1 //===- Schedule.cpp - Calculate an optimized schedule ---------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass generates an entirely new schedule tree from the data dependences
10 // and iteration domains. The new schedule tree is computed in two steps:
11 //
12 // 1) The isl scheduling optimizer is run
13 //
14 // The isl scheduling optimizer creates a new schedule tree that maximizes
15 // parallelism and tileability and minimizes data-dependence distances. The
16 // algorithm used is a modified version of the ``Pluto'' algorithm:
17 //
18 //   U. Bondhugula, A. Hartono, J. Ramanujam, and P. Sadayappan.
19 //   A Practical Automatic Polyhedral Parallelizer and Locality Optimizer.
20 //   In Proceedings of the 2008 ACM SIGPLAN Conference On Programming Language
21 //   Design and Implementation, PLDI ’08, pages 101–113. ACM, 2008.
22 //
23 // 2) A set of post-scheduling transformations is applied on the schedule tree.
24 //
25 // These optimizations include:
26 //
27 //  - Tiling of the innermost tilable bands
28 //  - Prevectorization - The choice of a possible outer loop that is strip-mined
29 //                       to the innermost level to enable inner-loop
30 //                       vectorization.
31 //  - Some optimizations for spatial locality are also planned.
32 //
33 // For a detailed description of the schedule tree itself please see section 6
34 // of:
35 //
36 // Polyhedral AST generation is more than scanning polyhedra
37 // Tobias Grosser, Sven Verdoolaege, Albert Cohen
38 // ACM Transactions on Programming Languages and Systems (TOPLAS),
39 // 37(4), July 2015
40 // http://www.grosser.es/#pub-polyhedral-AST-generation
41 //
42 // This publication also contains a detailed discussion of the different options
43 // for polyhedral loop unrolling, full/partial tile separation and other uses
44 // of the schedule tree.
45 //
46 //===----------------------------------------------------------------------===//
47 
48 #include "polly/ScheduleOptimizer.h"
49 #include "polly/CodeGen/CodeGeneration.h"
50 #include "polly/DependenceInfo.h"
51 #include "polly/LinkAllPasses.h"
52 #include "polly/Options.h"
53 #include "polly/ScheduleTreeTransform.h"
54 #include "polly/ScopInfo.h"
55 #include "polly/ScopPass.h"
56 #include "polly/Simplify.h"
57 #include "polly/Support/ISLOStream.h"
58 #include "llvm/ADT/Statistic.h"
59 #include "llvm/Analysis/TargetTransformInfo.h"
60 #include "llvm/IR/Function.h"
61 #include "llvm/Support/CommandLine.h"
62 #include "llvm/Support/Debug.h"
63 #include "llvm/Support/raw_ostream.h"
64 #include "isl/ctx.h"
65 #include "isl/options.h"
66 #include "isl/printer.h"
67 #include "isl/schedule.h"
68 #include "isl/schedule_node.h"
69 #include "isl/union_map.h"
70 #include "isl/union_set.h"
71 #include <algorithm>
72 #include <cassert>
73 #include <cmath>
74 #include <cstdint>
75 #include <cstdlib>
76 #include <string>
77 #include <vector>
78 
79 using namespace llvm;
80 using namespace polly;
81 
82 #define DEBUG_TYPE "polly-opt-isl"
83 
84 static cl::opt<std::string>
85     OptimizeDeps("polly-opt-optimize-only",
86                  cl::desc("Only a certain kind of dependences (all/raw)"),
87                  cl::Hidden, cl::init("all"), cl::ZeroOrMore,
88                  cl::cat(PollyCategory));
89 
90 static cl::opt<std::string>
91     SimplifyDeps("polly-opt-simplify-deps",
92                  cl::desc("Dependences should be simplified (yes/no)"),
93                  cl::Hidden, cl::init("yes"), cl::ZeroOrMore,
94                  cl::cat(PollyCategory));
95 
96 static cl::opt<int> MaxConstantTerm(
97     "polly-opt-max-constant-term",
98     cl::desc("The maximal constant term allowed (-1 is unlimited)"), cl::Hidden,
99     cl::init(20), cl::ZeroOrMore, cl::cat(PollyCategory));
100 
101 static cl::opt<int> MaxCoefficient(
102     "polly-opt-max-coefficient",
103     cl::desc("The maximal coefficient allowed (-1 is unlimited)"), cl::Hidden,
104     cl::init(20), cl::ZeroOrMore, cl::cat(PollyCategory));
105 
106 static cl::opt<std::string> FusionStrategy(
107     "polly-opt-fusion", cl::desc("The fusion strategy to choose (min/max)"),
108     cl::Hidden, cl::init("min"), cl::ZeroOrMore, cl::cat(PollyCategory));
109 
110 static cl::opt<std::string>
111     MaximizeBandDepth("polly-opt-maximize-bands",
112                       cl::desc("Maximize the band depth (yes/no)"), cl::Hidden,
113                       cl::init("yes"), cl::ZeroOrMore, cl::cat(PollyCategory));
114 
115 static cl::opt<std::string> OuterCoincidence(
116     "polly-opt-outer-coincidence",
117     cl::desc("Try to construct schedules where the outer member of each band "
118              "satisfies the coincidence constraints (yes/no)"),
119     cl::Hidden, cl::init("no"), cl::ZeroOrMore, cl::cat(PollyCategory));
120 
121 static cl::opt<int> PrevectorWidth(
122     "polly-prevect-width",
123     cl::desc(
124         "The number of loop iterations to strip-mine for pre-vectorization"),
125     cl::Hidden, cl::init(4), cl::ZeroOrMore, cl::cat(PollyCategory));
126 
127 static cl::opt<bool> FirstLevelTiling("polly-tiling",
128                                       cl::desc("Enable loop tiling"),
129                                       cl::init(true), cl::ZeroOrMore,
130                                       cl::cat(PollyCategory));
131 
132 static cl::opt<int> LatencyVectorFma(
133     "polly-target-latency-vector-fma",
134     cl::desc("The minimal number of cycles between issuing two "
135              "dependent consecutive vector fused multiply-add "
136              "instructions."),
137     cl::Hidden, cl::init(8), cl::ZeroOrMore, cl::cat(PollyCategory));
138 
139 static cl::opt<int> ThroughputVectorFma(
140     "polly-target-throughput-vector-fma",
141     cl::desc("A throughput of the processor floating-point arithmetic units "
142              "expressed in the number of vector fused multiply-add "
143              "instructions per clock cycle."),
144     cl::Hidden, cl::init(1), cl::ZeroOrMore, cl::cat(PollyCategory));
145 
146 // This option, along with --polly-target-2nd-cache-level-associativity,
147 // --polly-target-1st-cache-level-size, and --polly-target-2st-cache-level-size
148 // represent the parameters of the target cache, which do not have typical
149 // values that can be used by default. However, to apply the pattern matching
150 // optimizations, we use the values of the parameters of Intel Core i7-3820
151 // SandyBridge in case the parameters are not specified or not provided by the
152 // TargetTransformInfo.
153 static cl::opt<int> FirstCacheLevelAssociativity(
154     "polly-target-1st-cache-level-associativity",
155     cl::desc("The associativity of the first cache level."), cl::Hidden,
156     cl::init(-1), cl::ZeroOrMore, cl::cat(PollyCategory));
157 
158 static cl::opt<int> FirstCacheLevelDefaultAssociativity(
159     "polly-target-1st-cache-level-default-associativity",
160     cl::desc("The default associativity of the first cache level"
161              " (if not enough were provided by the TargetTransformInfo)."),
162     cl::Hidden, cl::init(8), cl::ZeroOrMore, cl::cat(PollyCategory));
163 
164 static cl::opt<int> SecondCacheLevelAssociativity(
165     "polly-target-2nd-cache-level-associativity",
166     cl::desc("The associativity of the second cache level."), cl::Hidden,
167     cl::init(-1), cl::ZeroOrMore, cl::cat(PollyCategory));
168 
169 static cl::opt<int> SecondCacheLevelDefaultAssociativity(
170     "polly-target-2nd-cache-level-default-associativity",
171     cl::desc("The default associativity of the second cache level"
172              " (if not enough were provided by the TargetTransformInfo)."),
173     cl::Hidden, cl::init(8), cl::ZeroOrMore, cl::cat(PollyCategory));
174 
175 static cl::opt<int> FirstCacheLevelSize(
176     "polly-target-1st-cache-level-size",
177     cl::desc("The size of the first cache level specified in bytes."),
178     cl::Hidden, cl::init(-1), cl::ZeroOrMore, cl::cat(PollyCategory));
179 
180 static cl::opt<int> FirstCacheLevelDefaultSize(
181     "polly-target-1st-cache-level-default-size",
182     cl::desc("The default size of the first cache level specified in bytes"
183              " (if not enough were provided by the TargetTransformInfo)."),
184     cl::Hidden, cl::init(32768), cl::ZeroOrMore, cl::cat(PollyCategory));
185 
186 static cl::opt<int> SecondCacheLevelSize(
187     "polly-target-2nd-cache-level-size",
188     cl::desc("The size of the second level specified in bytes."), cl::Hidden,
189     cl::init(-1), cl::ZeroOrMore, cl::cat(PollyCategory));
190 
191 static cl::opt<int> SecondCacheLevelDefaultSize(
192     "polly-target-2nd-cache-level-default-size",
193     cl::desc("The default size of the second cache level specified in bytes"
194              " (if not enough were provided by the TargetTransformInfo)."),
195     cl::Hidden, cl::init(262144), cl::ZeroOrMore, cl::cat(PollyCategory));
196 
197 static cl::opt<int> VectorRegisterBitwidth(
198     "polly-target-vector-register-bitwidth",
199     cl::desc("The size in bits of a vector register (if not set, this "
200              "information is taken from LLVM's target information."),
201     cl::Hidden, cl::init(-1), cl::ZeroOrMore, cl::cat(PollyCategory));
202 
203 static cl::opt<int> FirstLevelDefaultTileSize(
204     "polly-default-tile-size",
205     cl::desc("The default tile size (if not enough were provided by"
206              " --polly-tile-sizes)"),
207     cl::Hidden, cl::init(32), cl::ZeroOrMore, cl::cat(PollyCategory));
208 
209 static cl::list<int>
210     FirstLevelTileSizes("polly-tile-sizes",
211                         cl::desc("A tile size for each loop dimension, filled "
212                                  "with --polly-default-tile-size"),
213                         cl::Hidden, cl::ZeroOrMore, cl::CommaSeparated,
214                         cl::cat(PollyCategory));
215 
216 static cl::opt<bool>
217     SecondLevelTiling("polly-2nd-level-tiling",
218                       cl::desc("Enable a 2nd level loop of loop tiling"),
219                       cl::init(false), cl::ZeroOrMore, cl::cat(PollyCategory));
220 
221 static cl::opt<int> SecondLevelDefaultTileSize(
222     "polly-2nd-level-default-tile-size",
223     cl::desc("The default 2nd-level tile size (if not enough were provided by"
224              " --polly-2nd-level-tile-sizes)"),
225     cl::Hidden, cl::init(16), cl::ZeroOrMore, cl::cat(PollyCategory));
226 
227 static cl::list<int>
228     SecondLevelTileSizes("polly-2nd-level-tile-sizes",
229                          cl::desc("A tile size for each loop dimension, filled "
230                                   "with --polly-default-tile-size"),
231                          cl::Hidden, cl::ZeroOrMore, cl::CommaSeparated,
232                          cl::cat(PollyCategory));
233 
234 static cl::opt<bool> RegisterTiling("polly-register-tiling",
235                                     cl::desc("Enable register tiling"),
236                                     cl::init(false), cl::ZeroOrMore,
237                                     cl::cat(PollyCategory));
238 
239 static cl::opt<int> RegisterDefaultTileSize(
240     "polly-register-tiling-default-tile-size",
241     cl::desc("The default register tile size (if not enough were provided by"
242              " --polly-register-tile-sizes)"),
243     cl::Hidden, cl::init(2), cl::ZeroOrMore, cl::cat(PollyCategory));
244 
245 static cl::opt<int> PollyPatternMatchingNcQuotient(
246     "polly-pattern-matching-nc-quotient",
247     cl::desc("Quotient that is obtained by dividing Nc, the parameter of the"
248              "macro-kernel, by Nr, the parameter of the micro-kernel"),
249     cl::Hidden, cl::init(256), cl::ZeroOrMore, cl::cat(PollyCategory));
250 
251 static cl::list<int>
252     RegisterTileSizes("polly-register-tile-sizes",
253                       cl::desc("A tile size for each loop dimension, filled "
254                                "with --polly-register-tile-size"),
255                       cl::Hidden, cl::ZeroOrMore, cl::CommaSeparated,
256                       cl::cat(PollyCategory));
257 
258 static cl::opt<bool>
259     PMBasedOpts("polly-pattern-matching-based-opts",
260                 cl::desc("Perform optimizations based on pattern matching"),
261                 cl::init(true), cl::ZeroOrMore, cl::cat(PollyCategory));
262 
263 static cl::opt<bool> OptimizedScops(
264     "polly-optimized-scops",
265     cl::desc("Polly - Dump polyhedral description of Scops optimized with "
266              "the isl scheduling optimizer and the set of post-scheduling "
267              "transformations is applied on the schedule tree"),
268     cl::init(false), cl::ZeroOrMore, cl::cat(PollyCategory));
269 
270 STATISTIC(ScopsProcessed, "Number of scops processed");
271 STATISTIC(ScopsRescheduled, "Number of scops rescheduled");
272 STATISTIC(ScopsOptimized, "Number of scops optimized");
273 
274 STATISTIC(NumAffineLoopsOptimized, "Number of affine loops optimized");
275 STATISTIC(NumBoxedLoopsOptimized, "Number of boxed loops optimized");
276 
277 #define THREE_STATISTICS(VARNAME, DESC)                                        \
278   static Statistic VARNAME[3] = {                                              \
279       {DEBUG_TYPE, #VARNAME "0", DESC " (original)", {0}, {false}},            \
280       {DEBUG_TYPE, #VARNAME "1", DESC " (after scheduler)", {0}, {false}},     \
281       {DEBUG_TYPE, #VARNAME "2", DESC " (after optimizer)", {0}, {false}}}
282 
283 THREE_STATISTICS(NumBands, "Number of bands");
284 THREE_STATISTICS(NumBandMembers, "Number of band members");
285 THREE_STATISTICS(NumCoincident, "Number of coincident band members");
286 THREE_STATISTICS(NumPermutable, "Number of permutable bands");
287 THREE_STATISTICS(NumFilters, "Number of filter nodes");
288 THREE_STATISTICS(NumExtension, "Number of extension nodes");
289 
290 STATISTIC(FirstLevelTileOpts, "Number of first level tiling applied");
291 STATISTIC(SecondLevelTileOpts, "Number of second level tiling applied");
292 STATISTIC(RegisterTileOpts, "Number of register tiling applied");
293 STATISTIC(PrevectOpts, "Number of strip-mining for prevectorization applied");
294 STATISTIC(MatMulOpts,
295           "Number of matrix multiplication patterns detected and optimized");
296 
297 /// Create an isl::union_set, which describes the isolate option based on
298 /// IsolateDomain.
299 ///
300 /// @param IsolateDomain An isl::set whose @p OutDimsNum last dimensions should
301 ///                      belong to the current band node.
302 /// @param OutDimsNum    A number of dimensions that should belong to
303 ///                      the current band node.
304 static isl::union_set getIsolateOptions(isl::set IsolateDomain,
305                                         unsigned OutDimsNum) {
306   unsigned Dims = IsolateDomain.dim(isl::dim::set);
307   assert(OutDimsNum <= Dims &&
308          "The isl::set IsolateDomain is used to describe the range of schedule "
309          "dimensions values, which should be isolated. Consequently, the "
310          "number of its dimensions should be greater than or equal to the "
311          "number of the schedule dimensions.");
312   isl::map IsolateRelation = isl::map::from_domain(IsolateDomain);
313   IsolateRelation = IsolateRelation.move_dims(isl::dim::out, 0, isl::dim::in,
314                                               Dims - OutDimsNum, OutDimsNum);
315   isl::set IsolateOption = IsolateRelation.wrap();
316   isl::id Id = isl::id::alloc(IsolateOption.get_ctx(), "isolate", nullptr);
317   IsolateOption = IsolateOption.set_tuple_id(Id);
318   return isl::union_set(IsolateOption);
319 }
320 
321 namespace {
322 /// Create an isl::union_set, which describes the specified option for the
323 /// dimension of the current node.
324 ///
325 /// @param Ctx    An isl::ctx, which is used to create the isl::union_set.
326 /// @param Option The name of the option.
327 isl::union_set getDimOptions(isl::ctx Ctx, const char *Option) {
328   isl::space Space(Ctx, 0, 1);
329   auto DimOption = isl::set::universe(Space);
330   auto Id = isl::id::alloc(Ctx, Option, nullptr);
331   DimOption = DimOption.set_tuple_id(Id);
332   return isl::union_set(DimOption);
333 }
334 } // namespace
335 
336 /// Create an isl::union_set, which describes the option of the form
337 /// [isolate[] -> unroll[x]].
338 ///
339 /// @param Ctx An isl::ctx, which is used to create the isl::union_set.
340 static isl::union_set getUnrollIsolatedSetOptions(isl::ctx Ctx) {
341   isl::space Space = isl::space(Ctx, 0, 0, 1);
342   isl::map UnrollIsolatedSetOption = isl::map::universe(Space);
343   isl::id DimInId = isl::id::alloc(Ctx, "isolate", nullptr);
344   isl::id DimOutId = isl::id::alloc(Ctx, "unroll", nullptr);
345   UnrollIsolatedSetOption =
346       UnrollIsolatedSetOption.set_tuple_id(isl::dim::in, DimInId);
347   UnrollIsolatedSetOption =
348       UnrollIsolatedSetOption.set_tuple_id(isl::dim::out, DimOutId);
349   return UnrollIsolatedSetOption.wrap();
350 }
351 
352 /// Make the last dimension of Set to take values from 0 to VectorWidth - 1.
353 ///
354 /// @param Set         A set, which should be modified.
355 /// @param VectorWidth A parameter, which determines the constraint.
356 static isl::set addExtentConstraints(isl::set Set, int VectorWidth) {
357   unsigned Dims = Set.dim(isl::dim::set);
358   isl::space Space = Set.get_space();
359   isl::local_space LocalSpace = isl::local_space(Space);
360   isl::constraint ExtConstr = isl::constraint::alloc_inequality(LocalSpace);
361   ExtConstr = ExtConstr.set_constant_si(0);
362   ExtConstr = ExtConstr.set_coefficient_si(isl::dim::set, Dims - 1, 1);
363   Set = Set.add_constraint(ExtConstr);
364   ExtConstr = isl::constraint::alloc_inequality(LocalSpace);
365   ExtConstr = ExtConstr.set_constant_si(VectorWidth - 1);
366   ExtConstr = ExtConstr.set_coefficient_si(isl::dim::set, Dims - 1, -1);
367   return Set.add_constraint(ExtConstr);
368 }
369 
370 isl::set getPartialTilePrefixes(isl::set ScheduleRange, int VectorWidth) {
371   unsigned Dims = ScheduleRange.dim(isl::dim::set);
372   isl::set LoopPrefixes =
373       ScheduleRange.drop_constraints_involving_dims(isl::dim::set, Dims - 1, 1);
374   auto ExtentPrefixes = addExtentConstraints(LoopPrefixes, VectorWidth);
375   isl::set BadPrefixes = ExtentPrefixes.subtract(ScheduleRange);
376   BadPrefixes = BadPrefixes.project_out(isl::dim::set, Dims - 1, 1);
377   LoopPrefixes = LoopPrefixes.project_out(isl::dim::set, Dims - 1, 1);
378   return LoopPrefixes.subtract(BadPrefixes);
379 }
380 
381 isl::schedule_node
382 ScheduleTreeOptimizer::isolateFullPartialTiles(isl::schedule_node Node,
383                                                int VectorWidth) {
384   assert(isl_schedule_node_get_type(Node.get()) == isl_schedule_node_band);
385   Node = Node.child(0).child(0);
386   isl::union_map SchedRelUMap = Node.get_prefix_schedule_relation();
387   isl::map ScheduleRelation = isl::map::from_union_map(SchedRelUMap);
388   isl::set ScheduleRange = ScheduleRelation.range();
389   isl::set IsolateDomain = getPartialTilePrefixes(ScheduleRange, VectorWidth);
390   auto AtomicOption = getDimOptions(IsolateDomain.get_ctx(), "atomic");
391   isl::union_set IsolateOption = getIsolateOptions(IsolateDomain, 1);
392   Node = Node.parent().parent();
393   isl::union_set Options = IsolateOption.unite(AtomicOption);
394   Node = Node.band_set_ast_build_options(Options);
395   return Node;
396 }
397 
398 isl::schedule_node ScheduleTreeOptimizer::prevectSchedBand(
399     isl::schedule_node Node, unsigned DimToVectorize, int VectorWidth) {
400   assert(isl_schedule_node_get_type(Node.get()) == isl_schedule_node_band);
401 
402   auto Space = isl::manage(isl_schedule_node_band_get_space(Node.get()));
403   auto ScheduleDimensions = Space.dim(isl::dim::set);
404   assert(DimToVectorize < ScheduleDimensions);
405 
406   if (DimToVectorize > 0) {
407     Node = isl::manage(
408         isl_schedule_node_band_split(Node.release(), DimToVectorize));
409     Node = Node.child(0);
410   }
411   if (DimToVectorize < ScheduleDimensions - 1)
412     Node = isl::manage(isl_schedule_node_band_split(Node.release(), 1));
413   Space = isl::manage(isl_schedule_node_band_get_space(Node.get()));
414   auto Sizes = isl::multi_val::zero(Space);
415   Sizes = Sizes.set_val(0, isl::val(Node.get_ctx(), VectorWidth));
416   Node =
417       isl::manage(isl_schedule_node_band_tile(Node.release(), Sizes.release()));
418   Node = isolateFullPartialTiles(Node, VectorWidth);
419   Node = Node.child(0);
420   // Make sure the "trivially vectorizable loop" is not unrolled. Otherwise,
421   // we will have troubles to match it in the backend.
422   Node = Node.band_set_ast_build_options(
423       isl::union_set(Node.get_ctx(), "{ unroll[x]: 1 = 0 }"));
424   Node = isl::manage(isl_schedule_node_band_sink(Node.release()));
425   Node = Node.child(0);
426   if (isl_schedule_node_get_type(Node.get()) == isl_schedule_node_leaf)
427     Node = Node.parent();
428   auto LoopMarker = isl::id::alloc(Node.get_ctx(), "SIMD", nullptr);
429   PrevectOpts++;
430   return Node.insert_mark(LoopMarker);
431 }
432 
433 isl::schedule_node ScheduleTreeOptimizer::tileNode(isl::schedule_node Node,
434                                                    const char *Identifier,
435                                                    ArrayRef<int> TileSizes,
436                                                    int DefaultTileSize) {
437   auto Space = isl::manage(isl_schedule_node_band_get_space(Node.get()));
438   auto Dims = Space.dim(isl::dim::set);
439   auto Sizes = isl::multi_val::zero(Space);
440   std::string IdentifierString(Identifier);
441   for (unsigned i = 0; i < Dims; i++) {
442     auto tileSize = i < TileSizes.size() ? TileSizes[i] : DefaultTileSize;
443     Sizes = Sizes.set_val(i, isl::val(Node.get_ctx(), tileSize));
444   }
445   auto TileLoopMarkerStr = IdentifierString + " - Tiles";
446   auto TileLoopMarker =
447       isl::id::alloc(Node.get_ctx(), TileLoopMarkerStr, nullptr);
448   Node = Node.insert_mark(TileLoopMarker);
449   Node = Node.child(0);
450   Node =
451       isl::manage(isl_schedule_node_band_tile(Node.release(), Sizes.release()));
452   Node = Node.child(0);
453   auto PointLoopMarkerStr = IdentifierString + " - Points";
454   auto PointLoopMarker =
455       isl::id::alloc(Node.get_ctx(), PointLoopMarkerStr, nullptr);
456   Node = Node.insert_mark(PointLoopMarker);
457   return Node.child(0);
458 }
459 
460 isl::schedule_node ScheduleTreeOptimizer::applyRegisterTiling(
461     isl::schedule_node Node, ArrayRef<int> TileSizes, int DefaultTileSize) {
462   Node = tileNode(Node, "Register tiling", TileSizes, DefaultTileSize);
463   auto Ctx = Node.get_ctx();
464   return Node.band_set_ast_build_options(isl::union_set(Ctx, "{unroll[x]}"));
465 }
466 
467 static bool isSimpleInnermostBand(const isl::schedule_node &Node) {
468   assert(isl_schedule_node_get_type(Node.get()) == isl_schedule_node_band);
469   assert(isl_schedule_node_n_children(Node.get()) == 1);
470 
471   auto ChildType = isl_schedule_node_get_type(Node.child(0).get());
472 
473   if (ChildType == isl_schedule_node_leaf)
474     return true;
475 
476   if (ChildType != isl_schedule_node_sequence)
477     return false;
478 
479   auto Sequence = Node.child(0);
480 
481   for (int c = 0, nc = isl_schedule_node_n_children(Sequence.get()); c < nc;
482        ++c) {
483     auto Child = Sequence.child(c);
484     if (isl_schedule_node_get_type(Child.get()) != isl_schedule_node_filter)
485       return false;
486     if (isl_schedule_node_get_type(Child.child(0).get()) !=
487         isl_schedule_node_leaf)
488       return false;
489   }
490   return true;
491 }
492 
493 bool ScheduleTreeOptimizer::isTileableBandNode(isl::schedule_node Node) {
494   if (isl_schedule_node_get_type(Node.get()) != isl_schedule_node_band)
495     return false;
496 
497   if (isl_schedule_node_n_children(Node.get()) != 1)
498     return false;
499 
500   if (!isl_schedule_node_band_get_permutable(Node.get()))
501     return false;
502 
503   auto Space = isl::manage(isl_schedule_node_band_get_space(Node.get()));
504   auto Dims = Space.dim(isl::dim::set);
505 
506   if (Dims <= 1)
507     return false;
508 
509   return isSimpleInnermostBand(Node);
510 }
511 
512 __isl_give isl::schedule_node
513 ScheduleTreeOptimizer::standardBandOpts(isl::schedule_node Node, void *User) {
514   if (FirstLevelTiling) {
515     Node = tileNode(Node, "1st level tiling", FirstLevelTileSizes,
516                     FirstLevelDefaultTileSize);
517     FirstLevelTileOpts++;
518   }
519 
520   if (SecondLevelTiling) {
521     Node = tileNode(Node, "2nd level tiling", SecondLevelTileSizes,
522                     SecondLevelDefaultTileSize);
523     SecondLevelTileOpts++;
524   }
525 
526   if (RegisterTiling) {
527     Node =
528         applyRegisterTiling(Node, RegisterTileSizes, RegisterDefaultTileSize);
529     RegisterTileOpts++;
530   }
531 
532   if (PollyVectorizerChoice == VECTORIZER_NONE)
533     return Node;
534 
535   auto Space = isl::manage(isl_schedule_node_band_get_space(Node.get()));
536   auto Dims = Space.dim(isl::dim::set);
537 
538   for (int i = Dims - 1; i >= 0; i--)
539     if (Node.band_member_get_coincident(i)) {
540       Node = prevectSchedBand(Node, i, PrevectorWidth);
541       break;
542     }
543 
544   return Node;
545 }
546 
547 /// Permute the two dimensions of the isl map.
548 ///
549 /// Permute @p DstPos and @p SrcPos dimensions of the isl map @p Map that
550 /// have type @p DimType.
551 ///
552 /// @param Map     The isl map to be modified.
553 /// @param DimType The type of the dimensions.
554 /// @param DstPos  The first dimension.
555 /// @param SrcPos  The second dimension.
556 /// @return        The modified map.
557 isl::map permuteDimensions(isl::map Map, isl::dim DimType, unsigned DstPos,
558                            unsigned SrcPos) {
559   assert(DstPos < Map.dim(DimType) && SrcPos < Map.dim(DimType));
560   if (DstPos == SrcPos)
561     return Map;
562   isl::id DimId;
563   if (Map.has_tuple_id(DimType))
564     DimId = Map.get_tuple_id(DimType);
565   auto FreeDim = DimType == isl::dim::in ? isl::dim::out : isl::dim::in;
566   isl::id FreeDimId;
567   if (Map.has_tuple_id(FreeDim))
568     FreeDimId = Map.get_tuple_id(FreeDim);
569   auto MaxDim = std::max(DstPos, SrcPos);
570   auto MinDim = std::min(DstPos, SrcPos);
571   Map = Map.move_dims(FreeDim, 0, DimType, MaxDim, 1);
572   Map = Map.move_dims(FreeDim, 0, DimType, MinDim, 1);
573   Map = Map.move_dims(DimType, MinDim, FreeDim, 1, 1);
574   Map = Map.move_dims(DimType, MaxDim, FreeDim, 0, 1);
575   if (DimId)
576     Map = Map.set_tuple_id(DimType, DimId);
577   if (FreeDimId)
578     Map = Map.set_tuple_id(FreeDim, FreeDimId);
579   return Map;
580 }
581 
582 /// Check the form of the access relation.
583 ///
584 /// Check that the access relation @p AccMap has the form M[i][j], where i
585 /// is a @p FirstPos and j is a @p SecondPos.
586 ///
587 /// @param AccMap    The access relation to be checked.
588 /// @param FirstPos  The index of the input dimension that is mapped to
589 ///                  the first output dimension.
590 /// @param SecondPos The index of the input dimension that is mapped to the
591 ///                  second output dimension.
592 /// @return          True in case @p AccMap has the expected form and false,
593 ///                  otherwise.
594 static bool isMatMulOperandAcc(isl::set Domain, isl::map AccMap, int &FirstPos,
595                                int &SecondPos) {
596   isl::space Space = AccMap.get_space();
597   isl::map Universe = isl::map::universe(Space);
598 
599   if (Space.dim(isl::dim::out) != 2)
600     return false;
601 
602   // MatMul has the form:
603   // for (i = 0; i < N; i++)
604   //   for (j = 0; j < M; j++)
605   //     for (k = 0; k < P; k++)
606   //       C[i, j] += A[i, k] * B[k, j]
607   //
608   // Permutation of three outer loops: 3! = 6 possibilities.
609   int FirstDims[] = {0, 0, 1, 1, 2, 2};
610   int SecondDims[] = {1, 2, 2, 0, 0, 1};
611   for (int i = 0; i < 6; i += 1) {
612     auto PossibleMatMul =
613         Universe.equate(isl::dim::in, FirstDims[i], isl::dim::out, 0)
614             .equate(isl::dim::in, SecondDims[i], isl::dim::out, 1);
615 
616     AccMap = AccMap.intersect_domain(Domain);
617     PossibleMatMul = PossibleMatMul.intersect_domain(Domain);
618 
619     // If AccMap spans entire domain (Non-partial write),
620     // compute FirstPos and SecondPos.
621     // If AccMap != PossibleMatMul here (the two maps have been gisted at
622     // this point), it means that the writes are not complete, or in other
623     // words, it is a Partial write and Partial writes must be rejected.
624     if (AccMap.is_equal(PossibleMatMul)) {
625       if (FirstPos != -1 && FirstPos != FirstDims[i])
626         continue;
627       FirstPos = FirstDims[i];
628       if (SecondPos != -1 && SecondPos != SecondDims[i])
629         continue;
630       SecondPos = SecondDims[i];
631       return true;
632     }
633   }
634 
635   return false;
636 }
637 
638 /// Does the memory access represent a non-scalar operand of the matrix
639 /// multiplication.
640 ///
641 /// Check that the memory access @p MemAccess is the read access to a non-scalar
642 /// operand of the matrix multiplication or its result.
643 ///
644 /// @param MemAccess The memory access to be checked.
645 /// @param MMI       Parameters of the matrix multiplication operands.
646 /// @return          True in case the memory access represents the read access
647 ///                  to a non-scalar operand of the matrix multiplication and
648 ///                  false, otherwise.
649 static bool isMatMulNonScalarReadAccess(MemoryAccess *MemAccess,
650                                         MatMulInfoTy &MMI) {
651   if (!MemAccess->isLatestArrayKind() || !MemAccess->isRead())
652     return false;
653   auto AccMap = MemAccess->getLatestAccessRelation();
654   isl::set StmtDomain = MemAccess->getStatement()->getDomain();
655   if (isMatMulOperandAcc(StmtDomain, AccMap, MMI.i, MMI.j) && !MMI.ReadFromC) {
656     MMI.ReadFromC = MemAccess;
657     return true;
658   }
659   if (isMatMulOperandAcc(StmtDomain, AccMap, MMI.i, MMI.k) && !MMI.A) {
660     MMI.A = MemAccess;
661     return true;
662   }
663   if (isMatMulOperandAcc(StmtDomain, AccMap, MMI.k, MMI.j) && !MMI.B) {
664     MMI.B = MemAccess;
665     return true;
666   }
667   return false;
668 }
669 
670 /// Check accesses to operands of the matrix multiplication.
671 ///
672 /// Check that accesses of the SCoP statement, which corresponds to
673 /// the partial schedule @p PartialSchedule, are scalar in terms of loops
674 /// containing the matrix multiplication, in case they do not represent
675 /// accesses to the non-scalar operands of the matrix multiplication or
676 /// its result.
677 ///
678 /// @param  PartialSchedule The partial schedule of the SCoP statement.
679 /// @param  MMI             Parameters of the matrix multiplication operands.
680 /// @return                 True in case the corresponding SCoP statement
681 ///                         represents matrix multiplication and false,
682 ///                         otherwise.
683 static bool containsOnlyMatrMultAcc(isl::map PartialSchedule,
684                                     MatMulInfoTy &MMI) {
685   auto InputDimId = PartialSchedule.get_tuple_id(isl::dim::in);
686   auto *Stmt = static_cast<ScopStmt *>(InputDimId.get_user());
687   unsigned OutDimNum = PartialSchedule.dim(isl::dim::out);
688   assert(OutDimNum > 2 && "In case of the matrix multiplication the loop nest "
689                           "and, consequently, the corresponding scheduling "
690                           "functions have at least three dimensions.");
691   auto MapI =
692       permuteDimensions(PartialSchedule, isl::dim::out, MMI.i, OutDimNum - 1);
693   auto MapJ =
694       permuteDimensions(PartialSchedule, isl::dim::out, MMI.j, OutDimNum - 1);
695   auto MapK =
696       permuteDimensions(PartialSchedule, isl::dim::out, MMI.k, OutDimNum - 1);
697 
698   auto Accesses = getAccessesInOrder(*Stmt);
699   for (auto *MemA = Accesses.begin(); MemA != Accesses.end() - 1; MemA++) {
700     auto *MemAccessPtr = *MemA;
701     if (MemAccessPtr->isLatestArrayKind() && MemAccessPtr != MMI.WriteToC &&
702         !isMatMulNonScalarReadAccess(MemAccessPtr, MMI) &&
703         !(MemAccessPtr->isStrideZero(MapI)) &&
704         MemAccessPtr->isStrideZero(MapJ) && MemAccessPtr->isStrideZero(MapK))
705       return false;
706   }
707   return true;
708 }
709 
710 /// Check for dependencies corresponding to the matrix multiplication.
711 ///
712 /// Check that there is only true dependence of the form
713 /// S(..., k, ...) -> S(..., k + 1, …), where S is the SCoP statement
714 /// represented by @p Schedule and k is @p Pos. Such a dependence corresponds
715 /// to the dependency produced by the matrix multiplication.
716 ///
717 /// @param  Schedule The schedule of the SCoP statement.
718 /// @param  D The SCoP dependencies.
719 /// @param  Pos The parameter to describe an acceptable true dependence.
720 ///             In case it has a negative value, try to determine its
721 ///             acceptable value.
722 /// @return True in case dependencies correspond to the matrix multiplication
723 ///         and false, otherwise.
724 static bool containsOnlyMatMulDep(isl::map Schedule, const Dependences *D,
725                                   int &Pos) {
726   isl::union_map Dep = D->getDependences(Dependences::TYPE_RAW);
727   isl::union_map Red = D->getDependences(Dependences::TYPE_RED);
728   if (Red)
729     Dep = Dep.unite(Red);
730   auto DomainSpace = Schedule.get_space().domain();
731   auto Space = DomainSpace.map_from_domain_and_range(DomainSpace);
732   auto Deltas = Dep.extract_map(Space).deltas();
733   int DeltasDimNum = Deltas.dim(isl::dim::set);
734   for (int i = 0; i < DeltasDimNum; i++) {
735     auto Val = Deltas.plain_get_val_if_fixed(isl::dim::set, i);
736     Pos = Pos < 0 && Val.is_one() ? i : Pos;
737     if (Val.is_nan() || !(Val.is_zero() || (i == Pos && Val.is_one())))
738       return false;
739   }
740   if (DeltasDimNum == 0 || Pos < 0)
741     return false;
742   return true;
743 }
744 
745 /// Check if the SCoP statement could probably be optimized with analytical
746 /// modeling.
747 ///
748 /// containsMatrMult tries to determine whether the following conditions
749 /// are true:
750 /// 1. The last memory access modeling an array, MA1, represents writing to
751 ///    memory and has the form S(..., i1, ..., i2, ...) -> M(i1, i2) or
752 ///    S(..., i2, ..., i1, ...) -> M(i1, i2), where S is the SCoP statement
753 ///    under consideration.
754 /// 2. There is only one loop-carried true dependency, and it has the
755 ///    form S(..., i3, ...) -> S(..., i3 + 1, ...), and there are no
756 ///    loop-carried or anti dependencies.
757 /// 3. SCoP contains three access relations, MA2, MA3, and MA4 that represent
758 ///    reading from memory and have the form S(..., i3, ...) -> M(i1, i3),
759 ///    S(..., i3, ...) -> M(i3, i2), S(...) -> M(i1, i2), respectively,
760 ///    and all memory accesses of the SCoP that are different from MA1, MA2,
761 ///    MA3, and MA4 have stride 0, if the innermost loop is exchanged with any
762 ///    of loops i1, i2 and i3.
763 ///
764 /// @param PartialSchedule The PartialSchedule that contains a SCoP statement
765 ///        to check.
766 /// @D     The SCoP dependencies.
767 /// @MMI   Parameters of the matrix multiplication operands.
768 static bool containsMatrMult(isl::map PartialSchedule, const Dependences *D,
769                              MatMulInfoTy &MMI) {
770   auto InputDimsId = PartialSchedule.get_tuple_id(isl::dim::in);
771   auto *Stmt = static_cast<ScopStmt *>(InputDimsId.get_user());
772   if (Stmt->size() <= 1)
773     return false;
774 
775   auto Accesses = getAccessesInOrder(*Stmt);
776   for (auto *MemA = Accesses.end() - 1; MemA != Accesses.begin(); MemA--) {
777     auto *MemAccessPtr = *MemA;
778     if (!MemAccessPtr->isLatestArrayKind())
779       continue;
780     if (!MemAccessPtr->isWrite())
781       return false;
782     auto AccMap = MemAccessPtr->getLatestAccessRelation();
783     if (!isMatMulOperandAcc(Stmt->getDomain(), AccMap, MMI.i, MMI.j))
784       return false;
785     MMI.WriteToC = MemAccessPtr;
786     break;
787   }
788 
789   if (!containsOnlyMatMulDep(PartialSchedule, D, MMI.k))
790     return false;
791 
792   if (!MMI.WriteToC || !containsOnlyMatrMultAcc(PartialSchedule, MMI))
793     return false;
794 
795   if (!MMI.A || !MMI.B || !MMI.ReadFromC)
796     return false;
797   return true;
798 }
799 
800 /// Permute two dimensions of the band node.
801 ///
802 /// Permute FirstDim and SecondDim dimensions of the Node.
803 ///
804 /// @param Node The band node to be modified.
805 /// @param FirstDim The first dimension to be permuted.
806 /// @param SecondDim The second dimension to be permuted.
807 static isl::schedule_node permuteBandNodeDimensions(isl::schedule_node Node,
808                                                     unsigned FirstDim,
809                                                     unsigned SecondDim) {
810   assert(isl_schedule_node_get_type(Node.get()) == isl_schedule_node_band &&
811          isl_schedule_node_band_n_member(Node.get()) >
812              std::max(FirstDim, SecondDim));
813   auto PartialSchedule =
814       isl::manage(isl_schedule_node_band_get_partial_schedule(Node.get()));
815   auto PartialScheduleFirstDim = PartialSchedule.get_union_pw_aff(FirstDim);
816   auto PartialScheduleSecondDim = PartialSchedule.get_union_pw_aff(SecondDim);
817   PartialSchedule =
818       PartialSchedule.set_union_pw_aff(SecondDim, PartialScheduleFirstDim);
819   PartialSchedule =
820       PartialSchedule.set_union_pw_aff(FirstDim, PartialScheduleSecondDim);
821   Node = isl::manage(isl_schedule_node_delete(Node.release()));
822   return Node.insert_partial_schedule(PartialSchedule);
823 }
824 
825 isl::schedule_node ScheduleTreeOptimizer::createMicroKernel(
826     isl::schedule_node Node, MicroKernelParamsTy MicroKernelParams) {
827   Node = applyRegisterTiling(Node, {MicroKernelParams.Mr, MicroKernelParams.Nr},
828                              1);
829   Node = Node.parent().parent();
830   return permuteBandNodeDimensions(Node, 0, 1).child(0).child(0);
831 }
832 
833 isl::schedule_node ScheduleTreeOptimizer::createMacroKernel(
834     isl::schedule_node Node, MacroKernelParamsTy MacroKernelParams) {
835   assert(isl_schedule_node_get_type(Node.get()) == isl_schedule_node_band);
836   if (MacroKernelParams.Mc == 1 && MacroKernelParams.Nc == 1 &&
837       MacroKernelParams.Kc == 1)
838     return Node;
839   int DimOutNum = isl_schedule_node_band_n_member(Node.get());
840   std::vector<int> TileSizes(DimOutNum, 1);
841   TileSizes[DimOutNum - 3] = MacroKernelParams.Mc;
842   TileSizes[DimOutNum - 2] = MacroKernelParams.Nc;
843   TileSizes[DimOutNum - 1] = MacroKernelParams.Kc;
844   Node = tileNode(Node, "1st level tiling", TileSizes, 1);
845   Node = Node.parent().parent();
846   Node = permuteBandNodeDimensions(Node, DimOutNum - 2, DimOutNum - 1);
847   Node = permuteBandNodeDimensions(Node, DimOutNum - 3, DimOutNum - 1);
848 
849   // Mark the outermost loop as parallelizable.
850   Node = Node.band_member_set_coincident(0, true);
851 
852   return Node.child(0).child(0);
853 }
854 
855 /// Get the size of the widest type of the matrix multiplication operands
856 /// in bytes, including alignment padding.
857 ///
858 /// @param MMI Parameters of the matrix multiplication operands.
859 /// @return The size of the widest type of the matrix multiplication operands
860 ///         in bytes, including alignment padding.
861 static uint64_t getMatMulAlignTypeSize(MatMulInfoTy MMI) {
862   auto *S = MMI.A->getStatement()->getParent();
863   auto &DL = S->getFunction().getParent()->getDataLayout();
864   auto ElementSizeA = DL.getTypeAllocSize(MMI.A->getElementType());
865   auto ElementSizeB = DL.getTypeAllocSize(MMI.B->getElementType());
866   auto ElementSizeC = DL.getTypeAllocSize(MMI.WriteToC->getElementType());
867   return std::max({ElementSizeA, ElementSizeB, ElementSizeC});
868 }
869 
870 /// Get the size of the widest type of the matrix multiplication operands
871 /// in bits.
872 ///
873 /// @param MMI Parameters of the matrix multiplication operands.
874 /// @return The size of the widest type of the matrix multiplication operands
875 ///         in bits.
876 static uint64_t getMatMulTypeSize(MatMulInfoTy MMI) {
877   auto *S = MMI.A->getStatement()->getParent();
878   auto &DL = S->getFunction().getParent()->getDataLayout();
879   auto ElementSizeA = DL.getTypeSizeInBits(MMI.A->getElementType());
880   auto ElementSizeB = DL.getTypeSizeInBits(MMI.B->getElementType());
881   auto ElementSizeC = DL.getTypeSizeInBits(MMI.WriteToC->getElementType());
882   return std::max({ElementSizeA, ElementSizeB, ElementSizeC});
883 }
884 
885 /// Get parameters of the BLIS micro kernel.
886 ///
887 /// We choose the Mr and Nr parameters of the micro kernel to be large enough
888 /// such that no stalls caused by the combination of latencies and dependencies
889 /// are introduced during the updates of the resulting matrix of the matrix
890 /// multiplication. However, they should also be as small as possible to
891 /// release more registers for entries of multiplied matrices.
892 ///
893 /// @param TTI Target Transform Info.
894 /// @param MMI Parameters of the matrix multiplication operands.
895 /// @return The structure of type MicroKernelParamsTy.
896 /// @see MicroKernelParamsTy
897 static struct MicroKernelParamsTy
898 getMicroKernelParams(const TargetTransformInfo *TTI, MatMulInfoTy MMI) {
899   assert(TTI && "The target transform info should be provided.");
900 
901   // Nvec - Number of double-precision floating-point numbers that can be hold
902   // by a vector register. Use 2 by default.
903   long RegisterBitwidth = VectorRegisterBitwidth;
904 
905   if (RegisterBitwidth == -1)
906     RegisterBitwidth = TTI->getRegisterBitWidth(true);
907   auto ElementSize = getMatMulTypeSize(MMI);
908   assert(ElementSize > 0 && "The element size of the matrix multiplication "
909                             "operands should be greater than zero.");
910   auto Nvec = RegisterBitwidth / ElementSize;
911   if (Nvec == 0)
912     Nvec = 2;
913   int Nr =
914       ceil(sqrt(Nvec * LatencyVectorFma * ThroughputVectorFma) / Nvec) * Nvec;
915   int Mr = ceil(Nvec * LatencyVectorFma * ThroughputVectorFma / Nr);
916   return {Mr, Nr};
917 }
918 
919 namespace {
920 /// Determine parameters of the target cache.
921 ///
922 /// @param TTI Target Transform Info.
923 void getTargetCacheParameters(const llvm::TargetTransformInfo *TTI) {
924   auto L1DCache = llvm::TargetTransformInfo::CacheLevel::L1D;
925   auto L2DCache = llvm::TargetTransformInfo::CacheLevel::L2D;
926   if (FirstCacheLevelSize == -1) {
927     if (TTI->getCacheSize(L1DCache).hasValue())
928       FirstCacheLevelSize = TTI->getCacheSize(L1DCache).getValue();
929     else
930       FirstCacheLevelSize = static_cast<int>(FirstCacheLevelDefaultSize);
931   }
932   if (SecondCacheLevelSize == -1) {
933     if (TTI->getCacheSize(L2DCache).hasValue())
934       SecondCacheLevelSize = TTI->getCacheSize(L2DCache).getValue();
935     else
936       SecondCacheLevelSize = static_cast<int>(SecondCacheLevelDefaultSize);
937   }
938   if (FirstCacheLevelAssociativity == -1) {
939     if (TTI->getCacheAssociativity(L1DCache).hasValue())
940       FirstCacheLevelAssociativity =
941           TTI->getCacheAssociativity(L1DCache).getValue();
942     else
943       FirstCacheLevelAssociativity =
944           static_cast<int>(FirstCacheLevelDefaultAssociativity);
945   }
946   if (SecondCacheLevelAssociativity == -1) {
947     if (TTI->getCacheAssociativity(L2DCache).hasValue())
948       SecondCacheLevelAssociativity =
949           TTI->getCacheAssociativity(L2DCache).getValue();
950     else
951       SecondCacheLevelAssociativity =
952           static_cast<int>(SecondCacheLevelDefaultAssociativity);
953   }
954 }
955 } // namespace
956 
957 /// Get parameters of the BLIS macro kernel.
958 ///
959 /// During the computation of matrix multiplication, blocks of partitioned
960 /// matrices are mapped to different layers of the memory hierarchy.
961 /// To optimize data reuse, blocks should be ideally kept in cache between
962 /// iterations. Since parameters of the macro kernel determine sizes of these
963 /// blocks, there are upper and lower bounds on these parameters.
964 ///
965 /// @param TTI Target Transform Info.
966 /// @param MicroKernelParams Parameters of the micro-kernel
967 ///                          to be taken into account.
968 /// @param MMI Parameters of the matrix multiplication operands.
969 /// @return The structure of type MacroKernelParamsTy.
970 /// @see MacroKernelParamsTy
971 /// @see MicroKernelParamsTy
972 static struct MacroKernelParamsTy
973 getMacroKernelParams(const llvm::TargetTransformInfo *TTI,
974                      const MicroKernelParamsTy &MicroKernelParams,
975                      MatMulInfoTy MMI) {
976   getTargetCacheParameters(TTI);
977   // According to www.cs.utexas.edu/users/flame/pubs/TOMS-BLIS-Analytical.pdf,
978   // it requires information about the first two levels of a cache to determine
979   // all the parameters of a macro-kernel. It also checks that an associativity
980   // degree of a cache level is greater than two. Otherwise, another algorithm
981   // for determination of the parameters should be used.
982   if (!(MicroKernelParams.Mr > 0 && MicroKernelParams.Nr > 0 &&
983         FirstCacheLevelSize > 0 && SecondCacheLevelSize > 0 &&
984         FirstCacheLevelAssociativity > 2 && SecondCacheLevelAssociativity > 2))
985     return {1, 1, 1};
986   // The quotient should be greater than zero.
987   if (PollyPatternMatchingNcQuotient <= 0)
988     return {1, 1, 1};
989   int Car = floor(
990       (FirstCacheLevelAssociativity - 1) /
991       (1 + static_cast<double>(MicroKernelParams.Nr) / MicroKernelParams.Mr));
992 
993   // Car can be computed to be zero since it is floor to int.
994   // On Mac OS, division by 0 does not raise a signal. This causes negative
995   // tile sizes to be computed. Prevent division by Cac==0 by early returning
996   // if this happens.
997   if (Car == 0)
998     return {1, 1, 1};
999 
1000   auto ElementSize = getMatMulAlignTypeSize(MMI);
1001   assert(ElementSize > 0 && "The element size of the matrix multiplication "
1002                             "operands should be greater than zero.");
1003   int Kc = (Car * FirstCacheLevelSize) /
1004            (MicroKernelParams.Mr * FirstCacheLevelAssociativity * ElementSize);
1005   double Cac =
1006       static_cast<double>(Kc * ElementSize * SecondCacheLevelAssociativity) /
1007       SecondCacheLevelSize;
1008   int Mc = floor((SecondCacheLevelAssociativity - 2) / Cac);
1009   int Nc = PollyPatternMatchingNcQuotient * MicroKernelParams.Nr;
1010 
1011   assert(Mc > 0 && Nc > 0 && Kc > 0 &&
1012          "Matrix block sizes should be  greater than zero");
1013   return {Mc, Nc, Kc};
1014 }
1015 
1016 /// Create an access relation that is specific to
1017 ///        the matrix multiplication pattern.
1018 ///
1019 /// Create an access relation of the following form:
1020 /// [O0, O1, O2, O3, O4, O5, O6, O7, O8] -> [OI, O5, OJ]
1021 /// where I is @p FirstDim, J is @p SecondDim.
1022 ///
1023 /// It can be used, for example, to create relations that helps to consequently
1024 /// access elements of operands of a matrix multiplication after creation of
1025 /// the BLIS micro and macro kernels.
1026 ///
1027 /// @see ScheduleTreeOptimizer::createMicroKernel
1028 /// @see ScheduleTreeOptimizer::createMacroKernel
1029 ///
1030 /// Subsequently, the described access relation is applied to the range of
1031 /// @p MapOldIndVar, that is used to map original induction variables to
1032 /// the ones, which are produced by schedule transformations. It helps to
1033 /// define relations using a new space and, at the same time, keep them
1034 /// in the original one.
1035 ///
1036 /// @param MapOldIndVar The relation, which maps original induction variables
1037 ///                     to the ones, which are produced by schedule
1038 ///                     transformations.
1039 /// @param FirstDim, SecondDim The input dimensions that are used to define
1040 ///        the specified access relation.
1041 /// @return The specified access relation.
1042 isl::map getMatMulAccRel(isl::map MapOldIndVar, unsigned FirstDim,
1043                          unsigned SecondDim) {
1044   auto AccessRelSpace = isl::space(MapOldIndVar.get_ctx(), 0, 9, 3);
1045   auto AccessRel = isl::map::universe(AccessRelSpace);
1046   AccessRel = AccessRel.equate(isl::dim::in, FirstDim, isl::dim::out, 0);
1047   AccessRel = AccessRel.equate(isl::dim::in, 5, isl::dim::out, 1);
1048   AccessRel = AccessRel.equate(isl::dim::in, SecondDim, isl::dim::out, 2);
1049   return MapOldIndVar.apply_range(AccessRel);
1050 }
1051 
1052 isl::schedule_node createExtensionNode(isl::schedule_node Node,
1053                                        isl::map ExtensionMap) {
1054   auto Extension = isl::union_map(ExtensionMap);
1055   auto NewNode = isl::schedule_node::from_extension(Extension);
1056   return Node.graft_before(NewNode);
1057 }
1058 
1059 /// Apply the packing transformation.
1060 ///
1061 /// The packing transformation can be described as a data-layout
1062 /// transformation that requires to introduce a new array, copy data
1063 /// to the array, and change memory access locations to reference the array.
1064 /// It can be used to ensure that elements of the new array are read in-stride
1065 /// access, aligned to cache lines boundaries, and preloaded into certain cache
1066 /// levels.
1067 ///
1068 /// As an example let us consider the packing of the array A that would help
1069 /// to read its elements with in-stride access. An access to the array A
1070 /// is represented by an access relation that has the form
1071 /// S[i, j, k] -> A[i, k]. The scheduling function of the SCoP statement S has
1072 /// the form S[i,j, k] -> [floor((j mod Nc) / Nr), floor((i mod Mc) / Mr),
1073 /// k mod Kc, j mod Nr, i mod Mr].
1074 ///
1075 /// To ensure that elements of the array A are read in-stride access, we add
1076 /// a new array Packed_A[Mc/Mr][Kc][Mr] to the SCoP, using
1077 /// Scop::createScopArrayInfo, change the access relation
1078 /// S[i, j, k] -> A[i, k] to
1079 /// S[i, j, k] -> Packed_A[floor((i mod Mc) / Mr), k mod Kc, i mod Mr], using
1080 /// MemoryAccess::setNewAccessRelation, and copy the data to the array, using
1081 /// the copy statement created by Scop::addScopStmt.
1082 ///
1083 /// @param Node The schedule node to be optimized.
1084 /// @param MapOldIndVar The relation, which maps original induction variables
1085 ///                     to the ones, which are produced by schedule
1086 ///                     transformations.
1087 /// @param MicroParams, MacroParams Parameters of the BLIS kernel
1088 ///                                 to be taken into account.
1089 /// @param MMI Parameters of the matrix multiplication operands.
1090 /// @return The optimized schedule node.
1091 static isl::schedule_node
1092 optimizeDataLayoutMatrMulPattern(isl::schedule_node Node, isl::map MapOldIndVar,
1093                                  MicroKernelParamsTy MicroParams,
1094                                  MacroKernelParamsTy MacroParams,
1095                                  MatMulInfoTy &MMI) {
1096   auto InputDimsId = MapOldIndVar.get_tuple_id(isl::dim::in);
1097   auto *Stmt = static_cast<ScopStmt *>(InputDimsId.get_user());
1098 
1099   // Create a copy statement that corresponds to the memory access to the
1100   // matrix B, the second operand of the matrix multiplication.
1101   Node = Node.parent().parent().parent().parent().parent().parent();
1102   Node = isl::manage(isl_schedule_node_band_split(Node.release(), 2)).child(0);
1103   auto AccRel = getMatMulAccRel(MapOldIndVar, 3, 7);
1104   unsigned FirstDimSize = MacroParams.Nc / MicroParams.Nr;
1105   unsigned SecondDimSize = MacroParams.Kc;
1106   unsigned ThirdDimSize = MicroParams.Nr;
1107   auto *SAI = Stmt->getParent()->createScopArrayInfo(
1108       MMI.B->getElementType(), "Packed_B",
1109       {FirstDimSize, SecondDimSize, ThirdDimSize});
1110   AccRel = AccRel.set_tuple_id(isl::dim::out, SAI->getBasePtrId());
1111   auto OldAcc = MMI.B->getLatestAccessRelation();
1112   MMI.B->setNewAccessRelation(AccRel);
1113   auto ExtMap = MapOldIndVar.project_out(isl::dim::out, 2,
1114                                          MapOldIndVar.dim(isl::dim::out) - 2);
1115   ExtMap = ExtMap.reverse();
1116   ExtMap = ExtMap.fix_si(isl::dim::out, MMI.i, 0);
1117   auto Domain = Stmt->getDomain();
1118 
1119   // Restrict the domains of the copy statements to only execute when also its
1120   // originating statement is executed.
1121   auto DomainId = Domain.get_tuple_id();
1122   auto *NewStmt = Stmt->getParent()->addScopStmt(
1123       OldAcc, MMI.B->getLatestAccessRelation(), Domain);
1124   ExtMap = ExtMap.set_tuple_id(isl::dim::out, DomainId);
1125   ExtMap = ExtMap.intersect_range(Domain);
1126   ExtMap = ExtMap.set_tuple_id(isl::dim::out, NewStmt->getDomainId());
1127   Node = createExtensionNode(Node, ExtMap);
1128 
1129   // Create a copy statement that corresponds to the memory access
1130   // to the matrix A, the first operand of the matrix multiplication.
1131   Node = Node.child(0);
1132   AccRel = getMatMulAccRel(MapOldIndVar, 4, 6);
1133   FirstDimSize = MacroParams.Mc / MicroParams.Mr;
1134   ThirdDimSize = MicroParams.Mr;
1135   SAI = Stmt->getParent()->createScopArrayInfo(
1136       MMI.A->getElementType(), "Packed_A",
1137       {FirstDimSize, SecondDimSize, ThirdDimSize});
1138   AccRel = AccRel.set_tuple_id(isl::dim::out, SAI->getBasePtrId());
1139   OldAcc = MMI.A->getLatestAccessRelation();
1140   MMI.A->setNewAccessRelation(AccRel);
1141   ExtMap = MapOldIndVar.project_out(isl::dim::out, 3,
1142                                     MapOldIndVar.dim(isl::dim::out) - 3);
1143   ExtMap = ExtMap.reverse();
1144   ExtMap = ExtMap.fix_si(isl::dim::out, MMI.j, 0);
1145   NewStmt = Stmt->getParent()->addScopStmt(
1146       OldAcc, MMI.A->getLatestAccessRelation(), Domain);
1147 
1148   // Restrict the domains of the copy statements to only execute when also its
1149   // originating statement is executed.
1150   ExtMap = ExtMap.set_tuple_id(isl::dim::out, DomainId);
1151   ExtMap = ExtMap.intersect_range(Domain);
1152   ExtMap = ExtMap.set_tuple_id(isl::dim::out, NewStmt->getDomainId());
1153   Node = createExtensionNode(Node, ExtMap);
1154   return Node.child(0).child(0).child(0).child(0).child(0);
1155 }
1156 
1157 /// Get a relation mapping induction variables produced by schedule
1158 /// transformations to the original ones.
1159 ///
1160 /// @param Node The schedule node produced as the result of creation
1161 ///        of the BLIS kernels.
1162 /// @param MicroKernelParams, MacroKernelParams Parameters of the BLIS kernel
1163 ///                                             to be taken into account.
1164 /// @return  The relation mapping original induction variables to the ones
1165 ///          produced by schedule transformation.
1166 /// @see ScheduleTreeOptimizer::createMicroKernel
1167 /// @see ScheduleTreeOptimizer::createMacroKernel
1168 /// @see getMacroKernelParams
1169 isl::map
1170 getInductionVariablesSubstitution(isl::schedule_node Node,
1171                                   MicroKernelParamsTy MicroKernelParams,
1172                                   MacroKernelParamsTy MacroKernelParams) {
1173   auto Child = Node.child(0);
1174   auto UnMapOldIndVar = Child.get_prefix_schedule_union_map();
1175   auto MapOldIndVar = isl::map::from_union_map(UnMapOldIndVar);
1176   if (MapOldIndVar.dim(isl::dim::out) > 9)
1177     return MapOldIndVar.project_out(isl::dim::out, 0,
1178                                     MapOldIndVar.dim(isl::dim::out) - 9);
1179   return MapOldIndVar;
1180 }
1181 
1182 /// Isolate a set of partial tile prefixes and unroll the isolated part.
1183 ///
1184 /// The set should ensure that it contains only partial tile prefixes that have
1185 /// exactly Mr x Nr iterations of the two innermost loops produced by
1186 /// the optimization of the matrix multiplication. Mr and Nr are parameters of
1187 /// the micro-kernel.
1188 ///
1189 /// In case of parametric bounds, this helps to auto-vectorize the unrolled
1190 /// innermost loops, using the SLP vectorizer.
1191 ///
1192 /// @param Node              The schedule node to be modified.
1193 /// @param MicroKernelParams Parameters of the micro-kernel
1194 ///                          to be taken into account.
1195 /// @return The modified isl_schedule_node.
1196 static isl::schedule_node
1197 isolateAndUnrollMatMulInnerLoops(isl::schedule_node Node,
1198                                  struct MicroKernelParamsTy MicroKernelParams) {
1199   isl::schedule_node Child = Node.get_child(0);
1200   isl::union_map UnMapOldIndVar = Child.get_prefix_schedule_relation();
1201   isl::set Prefix = isl::map::from_union_map(UnMapOldIndVar).range();
1202   unsigned Dims = Prefix.dim(isl::dim::set);
1203   Prefix = Prefix.project_out(isl::dim::set, Dims - 1, 1);
1204   Prefix = getPartialTilePrefixes(Prefix, MicroKernelParams.Nr);
1205   Prefix = getPartialTilePrefixes(Prefix, MicroKernelParams.Mr);
1206 
1207   isl::union_set IsolateOption =
1208       getIsolateOptions(Prefix.add_dims(isl::dim::set, 3), 3);
1209   isl::ctx Ctx = Node.get_ctx();
1210   auto Options = IsolateOption.unite(getDimOptions(Ctx, "unroll"));
1211   Options = Options.unite(getUnrollIsolatedSetOptions(Ctx));
1212   Node = Node.band_set_ast_build_options(Options);
1213   Node = Node.parent().parent().parent();
1214   IsolateOption = getIsolateOptions(Prefix, 3);
1215   Options = IsolateOption.unite(getDimOptions(Ctx, "separate"));
1216   Node = Node.band_set_ast_build_options(Options);
1217   Node = Node.child(0).child(0).child(0);
1218   return Node;
1219 }
1220 
1221 /// Mark @p BasePtr with "Inter iteration alias-free" mark node.
1222 ///
1223 /// @param Node The child of the mark node to be inserted.
1224 /// @param BasePtr The pointer to be marked.
1225 /// @return The modified isl_schedule_node.
1226 static isl::schedule_node markInterIterationAliasFree(isl::schedule_node Node,
1227                                                       Value *BasePtr) {
1228   if (!BasePtr)
1229     return Node;
1230 
1231   auto Id =
1232       isl::id::alloc(Node.get_ctx(), "Inter iteration alias-free", BasePtr);
1233   return Node.insert_mark(Id).child(0);
1234 }
1235 
1236 /// Insert "Loop Vectorizer Disabled" mark node.
1237 ///
1238 /// @param Node The child of the mark node to be inserted.
1239 /// @return The modified isl_schedule_node.
1240 static isl::schedule_node markLoopVectorizerDisabled(isl::schedule_node Node) {
1241   auto Id = isl::id::alloc(Node.get_ctx(), "Loop Vectorizer Disabled", nullptr);
1242   return Node.insert_mark(Id).child(0);
1243 }
1244 
1245 /// Restore the initial ordering of dimensions of the band node
1246 ///
1247 /// In case the band node represents all the dimensions of the iteration
1248 /// domain, recreate the band node to restore the initial ordering of the
1249 /// dimensions.
1250 ///
1251 /// @param Node The band node to be modified.
1252 /// @return The modified schedule node.
1253 static isl::schedule_node
1254 getBandNodeWithOriginDimOrder(isl::schedule_node Node) {
1255   assert(isl_schedule_node_get_type(Node.get()) == isl_schedule_node_band);
1256   if (isl_schedule_node_get_type(Node.child(0).get()) != isl_schedule_node_leaf)
1257     return Node;
1258   auto Domain = Node.get_universe_domain();
1259   assert(isl_union_set_n_set(Domain.get()) == 1);
1260   if (Node.get_schedule_depth() != 0 ||
1261       (isl::set(Domain).dim(isl::dim::set) !=
1262        isl_schedule_node_band_n_member(Node.get())))
1263     return Node;
1264   Node = isl::manage(isl_schedule_node_delete(Node.copy()));
1265   auto PartialSchedulePwAff = Domain.identity_union_pw_multi_aff();
1266   auto PartialScheduleMultiPwAff =
1267       isl::multi_union_pw_aff(PartialSchedulePwAff);
1268   PartialScheduleMultiPwAff =
1269       PartialScheduleMultiPwAff.reset_tuple_id(isl::dim::set);
1270   return Node.insert_partial_schedule(PartialScheduleMultiPwAff);
1271 }
1272 
1273 isl::schedule_node
1274 ScheduleTreeOptimizer::optimizeMatMulPattern(isl::schedule_node Node,
1275                                              const TargetTransformInfo *TTI,
1276                                              MatMulInfoTy &MMI) {
1277   assert(TTI && "The target transform info should be provided.");
1278   Node = markInterIterationAliasFree(
1279       Node, MMI.WriteToC->getLatestScopArrayInfo()->getBasePtr());
1280   int DimOutNum = isl_schedule_node_band_n_member(Node.get());
1281   assert(DimOutNum > 2 && "In case of the matrix multiplication the loop nest "
1282                           "and, consequently, the corresponding scheduling "
1283                           "functions have at least three dimensions.");
1284   Node = getBandNodeWithOriginDimOrder(Node);
1285   Node = permuteBandNodeDimensions(Node, MMI.i, DimOutNum - 3);
1286   int NewJ = MMI.j == DimOutNum - 3 ? MMI.i : MMI.j;
1287   int NewK = MMI.k == DimOutNum - 3 ? MMI.i : MMI.k;
1288   Node = permuteBandNodeDimensions(Node, NewJ, DimOutNum - 2);
1289   NewK = NewK == DimOutNum - 2 ? NewJ : NewK;
1290   Node = permuteBandNodeDimensions(Node, NewK, DimOutNum - 1);
1291   auto MicroKernelParams = getMicroKernelParams(TTI, MMI);
1292   auto MacroKernelParams = getMacroKernelParams(TTI, MicroKernelParams, MMI);
1293   Node = createMacroKernel(Node, MacroKernelParams);
1294   Node = createMicroKernel(Node, MicroKernelParams);
1295   if (MacroKernelParams.Mc == 1 || MacroKernelParams.Nc == 1 ||
1296       MacroKernelParams.Kc == 1)
1297     return Node;
1298   auto MapOldIndVar = getInductionVariablesSubstitution(Node, MicroKernelParams,
1299                                                         MacroKernelParams);
1300   if (!MapOldIndVar)
1301     return Node;
1302   Node = markLoopVectorizerDisabled(Node.parent()).child(0);
1303   Node = isolateAndUnrollMatMulInnerLoops(Node, MicroKernelParams);
1304   return optimizeDataLayoutMatrMulPattern(Node, MapOldIndVar, MicroKernelParams,
1305                                           MacroKernelParams, MMI);
1306 }
1307 
1308 bool ScheduleTreeOptimizer::isMatrMultPattern(isl::schedule_node Node,
1309                                               const Dependences *D,
1310                                               MatMulInfoTy &MMI) {
1311   auto PartialSchedule = isl::manage(
1312       isl_schedule_node_band_get_partial_schedule_union_map(Node.get()));
1313   Node = Node.child(0);
1314   auto LeafType = isl_schedule_node_get_type(Node.get());
1315   Node = Node.parent();
1316   if (LeafType != isl_schedule_node_leaf ||
1317       isl_schedule_node_band_n_member(Node.get()) < 3 ||
1318       Node.get_schedule_depth() != 0 ||
1319       isl_union_map_n_map(PartialSchedule.get()) != 1)
1320     return false;
1321   auto NewPartialSchedule = isl::map::from_union_map(PartialSchedule);
1322   if (containsMatrMult(NewPartialSchedule, D, MMI))
1323     return true;
1324   return false;
1325 }
1326 
1327 __isl_give isl_schedule_node *
1328 ScheduleTreeOptimizer::optimizeBand(__isl_take isl_schedule_node *Node,
1329                                     void *User) {
1330   if (!isTileableBandNode(isl::manage_copy(Node)))
1331     return Node;
1332 
1333   const OptimizerAdditionalInfoTy *OAI =
1334       static_cast<const OptimizerAdditionalInfoTy *>(User);
1335 
1336   MatMulInfoTy MMI;
1337   if (PMBasedOpts && User &&
1338       isMatrMultPattern(isl::manage_copy(Node), OAI->D, MMI)) {
1339     LLVM_DEBUG(dbgs() << "The matrix multiplication pattern was detected\n");
1340     MatMulOpts++;
1341     return optimizeMatMulPattern(isl::manage(Node), OAI->TTI, MMI).release();
1342   }
1343 
1344   return standardBandOpts(isl::manage(Node), User).release();
1345 }
1346 
1347 isl::schedule
1348 ScheduleTreeOptimizer::optimizeSchedule(isl::schedule Schedule,
1349                                         const OptimizerAdditionalInfoTy *OAI) {
1350   auto Root = Schedule.get_root();
1351   Root = optimizeScheduleNode(Root, OAI);
1352   return Root.get_schedule();
1353 }
1354 
1355 isl::schedule_node ScheduleTreeOptimizer::optimizeScheduleNode(
1356     isl::schedule_node Node, const OptimizerAdditionalInfoTy *OAI) {
1357   Node = isl::manage(isl_schedule_node_map_descendant_bottom_up(
1358       Node.release(), optimizeBand,
1359       const_cast<void *>(static_cast<const void *>(OAI))));
1360   return Node;
1361 }
1362 
1363 bool ScheduleTreeOptimizer::isProfitableSchedule(Scop &S,
1364                                                  isl::schedule NewSchedule) {
1365   // To understand if the schedule has been optimized we check if the schedule
1366   // has changed at all.
1367   // TODO: We can improve this by tracking if any necessarily beneficial
1368   // transformations have been performed. This can e.g. be tiling, loop
1369   // interchange, or ...) We can track this either at the place where the
1370   // transformation has been performed or, in case of automatic ILP based
1371   // optimizations, by comparing (yet to be defined) performance metrics
1372   // before/after the scheduling optimizer
1373   // (e.g., #stride-one accesses)
1374   auto NewScheduleMap = NewSchedule.get_map();
1375   auto OldSchedule = S.getSchedule();
1376   assert(OldSchedule && "Only IslScheduleOptimizer can insert extension nodes "
1377                         "that make Scop::getSchedule() return nullptr.");
1378   bool changed = !OldSchedule.is_equal(NewScheduleMap);
1379   return changed;
1380 }
1381 
1382 namespace {
1383 
1384 class IslScheduleOptimizer : public ScopPass {
1385 public:
1386   static char ID;
1387 
1388   explicit IslScheduleOptimizer() : ScopPass(ID) {}
1389 
1390   ~IslScheduleOptimizer() override { isl_schedule_free(LastSchedule); }
1391 
1392   /// Optimize the schedule of the SCoP @p S.
1393   bool runOnScop(Scop &S) override;
1394 
1395   /// Print the new schedule for the SCoP @p S.
1396   void printScop(raw_ostream &OS, Scop &S) const override;
1397 
1398   /// Register all analyses and transformation required.
1399   void getAnalysisUsage(AnalysisUsage &AU) const override;
1400 
1401   /// Release the internal memory.
1402   void releaseMemory() override {
1403     isl_schedule_free(LastSchedule);
1404     LastSchedule = nullptr;
1405   }
1406 
1407 private:
1408   isl_schedule *LastSchedule = nullptr;
1409 };
1410 } // namespace
1411 
1412 char IslScheduleOptimizer::ID = 0;
1413 
1414 /// Collect statistics for the schedule tree.
1415 ///
1416 /// @param Schedule The schedule tree to analyze. If not a schedule tree it is
1417 /// ignored.
1418 /// @param Version  The version of the schedule tree that is analyzed.
1419 ///                 0 for the original schedule tree before any transformation.
1420 ///                 1 for the schedule tree after isl's rescheduling.
1421 ///                 2 for the schedule tree after optimizations are applied
1422 ///                 (tiling, pattern matching)
1423 static void walkScheduleTreeForStatistics(isl::schedule Schedule, int Version) {
1424   auto Root = Schedule.get_root();
1425   if (!Root)
1426     return;
1427 
1428   isl_schedule_node_foreach_descendant_top_down(
1429       Root.get(),
1430       [](__isl_keep isl_schedule_node *nodeptr, void *user) -> isl_bool {
1431         isl::schedule_node Node = isl::manage_copy(nodeptr);
1432         int Version = *static_cast<int *>(user);
1433 
1434         switch (isl_schedule_node_get_type(Node.get())) {
1435         case isl_schedule_node_band: {
1436           NumBands[Version]++;
1437           if (isl_schedule_node_band_get_permutable(Node.get()) ==
1438               isl_bool_true)
1439             NumPermutable[Version]++;
1440 
1441           int CountMembers = isl_schedule_node_band_n_member(Node.get());
1442           NumBandMembers[Version] += CountMembers;
1443           for (int i = 0; i < CountMembers; i += 1) {
1444             if (Node.band_member_get_coincident(i))
1445               NumCoincident[Version]++;
1446           }
1447           break;
1448         }
1449 
1450         case isl_schedule_node_filter:
1451           NumFilters[Version]++;
1452           break;
1453 
1454         case isl_schedule_node_extension:
1455           NumExtension[Version]++;
1456           break;
1457 
1458         default:
1459           break;
1460         }
1461 
1462         return isl_bool_true;
1463       },
1464       &Version);
1465 }
1466 
1467 bool IslScheduleOptimizer::runOnScop(Scop &S) {
1468   // Skip SCoPs in case they're already optimised by PPCGCodeGeneration
1469   if (S.isToBeSkipped())
1470     return false;
1471 
1472   // Skip empty SCoPs but still allow code generation as it will delete the
1473   // loops present but not needed.
1474   if (S.getSize() == 0) {
1475     S.markAsOptimized();
1476     return false;
1477   }
1478 
1479   const Dependences &D =
1480       getAnalysis<DependenceInfo>().getDependences(Dependences::AL_Statement);
1481 
1482   if (D.getSharedIslCtx() != S.getSharedIslCtx()) {
1483     LLVM_DEBUG(dbgs() << "DependenceInfo for another SCoP/isl_ctx\n");
1484     return false;
1485   }
1486 
1487   if (!D.hasValidDependences())
1488     return false;
1489 
1490   isl_schedule_free(LastSchedule);
1491   LastSchedule = nullptr;
1492 
1493   // Build input data.
1494   int ValidityKinds =
1495       Dependences::TYPE_RAW | Dependences::TYPE_WAR | Dependences::TYPE_WAW;
1496   int ProximityKinds;
1497 
1498   if (OptimizeDeps == "all")
1499     ProximityKinds =
1500         Dependences::TYPE_RAW | Dependences::TYPE_WAR | Dependences::TYPE_WAW;
1501   else if (OptimizeDeps == "raw")
1502     ProximityKinds = Dependences::TYPE_RAW;
1503   else {
1504     errs() << "Do not know how to optimize for '" << OptimizeDeps << "'"
1505            << " Falling back to optimizing all dependences.\n";
1506     ProximityKinds =
1507         Dependences::TYPE_RAW | Dependences::TYPE_WAR | Dependences::TYPE_WAW;
1508   }
1509 
1510   isl::union_set Domain = S.getDomains();
1511 
1512   if (!Domain)
1513     return false;
1514 
1515   ScopsProcessed++;
1516   walkScheduleTreeForStatistics(S.getScheduleTree(), 0);
1517 
1518   isl::union_map Validity = D.getDependences(ValidityKinds);
1519   isl::union_map Proximity = D.getDependences(ProximityKinds);
1520 
1521   // Simplify the dependences by removing the constraints introduced by the
1522   // domains. This can speed up the scheduling time significantly, as large
1523   // constant coefficients will be removed from the dependences. The
1524   // introduction of some additional dependences reduces the possible
1525   // transformations, but in most cases, such transformation do not seem to be
1526   // interesting anyway. In some cases this option may stop the scheduler to
1527   // find any schedule.
1528   if (SimplifyDeps == "yes") {
1529     Validity = Validity.gist_domain(Domain);
1530     Validity = Validity.gist_range(Domain);
1531     Proximity = Proximity.gist_domain(Domain);
1532     Proximity = Proximity.gist_range(Domain);
1533   } else if (SimplifyDeps != "no") {
1534     errs() << "warning: Option -polly-opt-simplify-deps should either be 'yes' "
1535               "or 'no'. Falling back to default: 'yes'\n";
1536   }
1537 
1538   LLVM_DEBUG(dbgs() << "\n\nCompute schedule from: ");
1539   LLVM_DEBUG(dbgs() << "Domain := " << Domain << ";\n");
1540   LLVM_DEBUG(dbgs() << "Proximity := " << Proximity << ";\n");
1541   LLVM_DEBUG(dbgs() << "Validity := " << Validity << ";\n");
1542 
1543   unsigned IslSerializeSCCs;
1544 
1545   if (FusionStrategy == "max") {
1546     IslSerializeSCCs = 0;
1547   } else if (FusionStrategy == "min") {
1548     IslSerializeSCCs = 1;
1549   } else {
1550     errs() << "warning: Unknown fusion strategy. Falling back to maximal "
1551               "fusion.\n";
1552     IslSerializeSCCs = 0;
1553   }
1554 
1555   int IslMaximizeBands;
1556 
1557   if (MaximizeBandDepth == "yes") {
1558     IslMaximizeBands = 1;
1559   } else if (MaximizeBandDepth == "no") {
1560     IslMaximizeBands = 0;
1561   } else {
1562     errs() << "warning: Option -polly-opt-maximize-bands should either be 'yes'"
1563               " or 'no'. Falling back to default: 'yes'\n";
1564     IslMaximizeBands = 1;
1565   }
1566 
1567   int IslOuterCoincidence;
1568 
1569   if (OuterCoincidence == "yes") {
1570     IslOuterCoincidence = 1;
1571   } else if (OuterCoincidence == "no") {
1572     IslOuterCoincidence = 0;
1573   } else {
1574     errs() << "warning: Option -polly-opt-outer-coincidence should either be "
1575               "'yes' or 'no'. Falling back to default: 'no'\n";
1576     IslOuterCoincidence = 0;
1577   }
1578 
1579   isl_ctx *Ctx = S.getIslCtx().get();
1580 
1581   isl_options_set_schedule_outer_coincidence(Ctx, IslOuterCoincidence);
1582   isl_options_set_schedule_serialize_sccs(Ctx, IslSerializeSCCs);
1583   isl_options_set_schedule_maximize_band_depth(Ctx, IslMaximizeBands);
1584   isl_options_set_schedule_max_constant_term(Ctx, MaxConstantTerm);
1585   isl_options_set_schedule_max_coefficient(Ctx, MaxCoefficient);
1586   isl_options_set_tile_scale_tile_loops(Ctx, 0);
1587 
1588   auto OnErrorStatus = isl_options_get_on_error(Ctx);
1589   isl_options_set_on_error(Ctx, ISL_ON_ERROR_CONTINUE);
1590 
1591   auto SC = isl::schedule_constraints::on_domain(Domain);
1592   SC = SC.set_proximity(Proximity);
1593   SC = SC.set_validity(Validity);
1594   SC = SC.set_coincidence(Validity);
1595   auto Schedule = SC.compute_schedule();
1596   isl_options_set_on_error(Ctx, OnErrorStatus);
1597 
1598   walkScheduleTreeForStatistics(Schedule, 1);
1599 
1600   // In cases the scheduler is not able to optimize the code, we just do not
1601   // touch the schedule.
1602   if (!Schedule)
1603     return false;
1604 
1605   ScopsRescheduled++;
1606 
1607   LLVM_DEBUG({
1608     auto *P = isl_printer_to_str(Ctx);
1609     P = isl_printer_set_yaml_style(P, ISL_YAML_STYLE_BLOCK);
1610     P = isl_printer_print_schedule(P, Schedule.get());
1611     auto *str = isl_printer_get_str(P);
1612     dbgs() << "NewScheduleTree: \n" << str << "\n";
1613     free(str);
1614     isl_printer_free(P);
1615   });
1616 
1617   Function &F = S.getFunction();
1618   auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
1619   const OptimizerAdditionalInfoTy OAI = {TTI, const_cast<Dependences *>(&D)};
1620   auto NewSchedule = ScheduleTreeOptimizer::optimizeSchedule(Schedule, &OAI);
1621   NewSchedule = hoistExtensionNodes(NewSchedule);
1622   walkScheduleTreeForStatistics(NewSchedule, 2);
1623 
1624   if (!ScheduleTreeOptimizer::isProfitableSchedule(S, NewSchedule))
1625     return false;
1626 
1627   auto ScopStats = S.getStatistics();
1628   ScopsOptimized++;
1629   NumAffineLoopsOptimized += ScopStats.NumAffineLoops;
1630   NumBoxedLoopsOptimized += ScopStats.NumBoxedLoops;
1631 
1632   S.setScheduleTree(NewSchedule);
1633   S.markAsOptimized();
1634 
1635   if (OptimizedScops)
1636     errs() << S;
1637 
1638   return false;
1639 }
1640 
1641 void IslScheduleOptimizer::printScop(raw_ostream &OS, Scop &) const {
1642   isl_printer *p;
1643   char *ScheduleStr;
1644 
1645   OS << "Calculated schedule:\n";
1646 
1647   if (!LastSchedule) {
1648     OS << "n/a\n";
1649     return;
1650   }
1651 
1652   p = isl_printer_to_str(isl_schedule_get_ctx(LastSchedule));
1653   p = isl_printer_print_schedule(p, LastSchedule);
1654   ScheduleStr = isl_printer_get_str(p);
1655   isl_printer_free(p);
1656 
1657   OS << ScheduleStr << "\n";
1658 }
1659 
1660 void IslScheduleOptimizer::getAnalysisUsage(AnalysisUsage &AU) const {
1661   ScopPass::getAnalysisUsage(AU);
1662   AU.addRequired<DependenceInfo>();
1663   AU.addRequired<TargetTransformInfoWrapperPass>();
1664 
1665   AU.addPreserved<DependenceInfo>();
1666 }
1667 
1668 Pass *polly::createIslScheduleOptimizerPass() {
1669   return new IslScheduleOptimizer();
1670 }
1671 
1672 INITIALIZE_PASS_BEGIN(IslScheduleOptimizer, "polly-opt-isl",
1673                       "Polly - Optimize schedule of SCoP", false, false);
1674 INITIALIZE_PASS_DEPENDENCY(DependenceInfo);
1675 INITIALIZE_PASS_DEPENDENCY(ScopInfoRegionPass);
1676 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass);
1677 INITIALIZE_PASS_END(IslScheduleOptimizer, "polly-opt-isl",
1678                     "Polly - Optimize schedule of SCoP", false, false)
1679