1 //===- Schedule.cpp - Calculate an optimized schedule ---------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass generates an entirely new schedule tree from the data dependences
10 // and iteration domains. The new schedule tree is computed in two steps:
11 //
12 // 1) The isl scheduling optimizer is run
13 //
14 // The isl scheduling optimizer creates a new schedule tree that maximizes
15 // parallelism and tileability and minimizes data-dependence distances. The
16 // algorithm used is a modified version of the ``Pluto'' algorithm:
17 //
18 //   U. Bondhugula, A. Hartono, J. Ramanujam, and P. Sadayappan.
19 //   A Practical Automatic Polyhedral Parallelizer and Locality Optimizer.
20 //   In Proceedings of the 2008 ACM SIGPLAN Conference On Programming Language
21 //   Design and Implementation, PLDI ’08, pages 101–113. ACM, 2008.
22 //
23 // 2) A set of post-scheduling transformations is applied on the schedule tree.
24 //
25 // These optimizations include:
26 //
27 //  - Tiling of the innermost tilable bands
28 //  - Prevectorization - The choice of a possible outer loop that is strip-mined
29 //                       to the innermost level to enable inner-loop
30 //                       vectorization.
31 //  - Some optimizations for spatial locality are also planned.
32 //
33 // For a detailed description of the schedule tree itself please see section 6
34 // of:
35 //
36 // Polyhedral AST generation is more than scanning polyhedra
37 // Tobias Grosser, Sven Verdoolaege, Albert Cohen
38 // ACM Transactions on Programming Languages and Systems (TOPLAS),
39 // 37(4), July 2015
40 // http://www.grosser.es/#pub-polyhedral-AST-generation
41 //
42 // This publication also contains a detailed discussion of the different options
43 // for polyhedral loop unrolling, full/partial tile separation and other uses
44 // of the schedule tree.
45 //
46 //===----------------------------------------------------------------------===//
47 
48 #include "polly/ScheduleOptimizer.h"
49 #include "polly/CodeGen/CodeGeneration.h"
50 #include "polly/DependenceInfo.h"
51 #include "polly/LinkAllPasses.h"
52 #include "polly/Options.h"
53 #include "polly/ScopInfo.h"
54 #include "polly/ScopPass.h"
55 #include "polly/Simplify.h"
56 #include "polly/Support/GICHelper.h"
57 #include "polly/Support/ISLOStream.h"
58 #include "llvm/ADT/Statistic.h"
59 #include "llvm/Analysis/TargetTransformInfo.h"
60 #include "llvm/IR/Function.h"
61 #include "llvm/Pass.h"
62 #include "llvm/Support/CommandLine.h"
63 #include "llvm/Support/Debug.h"
64 #include "llvm/Support/raw_ostream.h"
65 #include "isl/constraint.h"
66 #include "isl/ctx.h"
67 #include "isl/map.h"
68 #include "isl/options.h"
69 #include "isl/printer.h"
70 #include "isl/schedule.h"
71 #include "isl/schedule_node.h"
72 #include "isl/space.h"
73 #include "isl/union_map.h"
74 #include "isl/union_set.h"
75 #include <algorithm>
76 #include <cassert>
77 #include <cmath>
78 #include <cstdint>
79 #include <cstdlib>
80 #include <string>
81 #include <vector>
82 
83 using namespace llvm;
84 using namespace polly;
85 
86 #define DEBUG_TYPE "polly-opt-isl"
87 
88 static cl::opt<std::string>
89     OptimizeDeps("polly-opt-optimize-only",
90                  cl::desc("Only a certain kind of dependences (all/raw)"),
91                  cl::Hidden, cl::init("all"), cl::ZeroOrMore,
92                  cl::cat(PollyCategory));
93 
94 static cl::opt<std::string>
95     SimplifyDeps("polly-opt-simplify-deps",
96                  cl::desc("Dependences should be simplified (yes/no)"),
97                  cl::Hidden, cl::init("yes"), cl::ZeroOrMore,
98                  cl::cat(PollyCategory));
99 
100 static cl::opt<int> MaxConstantTerm(
101     "polly-opt-max-constant-term",
102     cl::desc("The maximal constant term allowed (-1 is unlimited)"), cl::Hidden,
103     cl::init(20), cl::ZeroOrMore, cl::cat(PollyCategory));
104 
105 static cl::opt<int> MaxCoefficient(
106     "polly-opt-max-coefficient",
107     cl::desc("The maximal coefficient allowed (-1 is unlimited)"), cl::Hidden,
108     cl::init(20), cl::ZeroOrMore, cl::cat(PollyCategory));
109 
110 static cl::opt<std::string> FusionStrategy(
111     "polly-opt-fusion", cl::desc("The fusion strategy to choose (min/max)"),
112     cl::Hidden, cl::init("min"), cl::ZeroOrMore, cl::cat(PollyCategory));
113 
114 static cl::opt<std::string>
115     MaximizeBandDepth("polly-opt-maximize-bands",
116                       cl::desc("Maximize the band depth (yes/no)"), cl::Hidden,
117                       cl::init("yes"), cl::ZeroOrMore, cl::cat(PollyCategory));
118 
119 static cl::opt<std::string> OuterCoincidence(
120     "polly-opt-outer-coincidence",
121     cl::desc("Try to construct schedules where the outer member of each band "
122              "satisfies the coincidence constraints (yes/no)"),
123     cl::Hidden, cl::init("no"), cl::ZeroOrMore, cl::cat(PollyCategory));
124 
125 static cl::opt<int> PrevectorWidth(
126     "polly-prevect-width",
127     cl::desc(
128         "The number of loop iterations to strip-mine for pre-vectorization"),
129     cl::Hidden, cl::init(4), cl::ZeroOrMore, cl::cat(PollyCategory));
130 
131 static cl::opt<bool> FirstLevelTiling("polly-tiling",
132                                       cl::desc("Enable loop tiling"),
133                                       cl::init(true), cl::ZeroOrMore,
134                                       cl::cat(PollyCategory));
135 
136 static cl::opt<int> LatencyVectorFma(
137     "polly-target-latency-vector-fma",
138     cl::desc("The minimal number of cycles between issuing two "
139              "dependent consecutive vector fused multiply-add "
140              "instructions."),
141     cl::Hidden, cl::init(8), cl::ZeroOrMore, cl::cat(PollyCategory));
142 
143 static cl::opt<int> ThroughputVectorFma(
144     "polly-target-throughput-vector-fma",
145     cl::desc("A throughput of the processor floating-point arithmetic units "
146              "expressed in the number of vector fused multiply-add "
147              "instructions per clock cycle."),
148     cl::Hidden, cl::init(1), cl::ZeroOrMore, cl::cat(PollyCategory));
149 
150 // This option, along with --polly-target-2nd-cache-level-associativity,
151 // --polly-target-1st-cache-level-size, and --polly-target-2st-cache-level-size
152 // represent the parameters of the target cache, which do not have typical
153 // values that can be used by default. However, to apply the pattern matching
154 // optimizations, we use the values of the parameters of Intel Core i7-3820
155 // SandyBridge in case the parameters are not specified or not provided by the
156 // TargetTransformInfo.
157 static cl::opt<int> FirstCacheLevelAssociativity(
158     "polly-target-1st-cache-level-associativity",
159     cl::desc("The associativity of the first cache level."), cl::Hidden,
160     cl::init(-1), cl::ZeroOrMore, cl::cat(PollyCategory));
161 
162 static cl::opt<int> FirstCacheLevelDefaultAssociativity(
163     "polly-target-1st-cache-level-default-associativity",
164     cl::desc("The default associativity of the first cache level"
165              " (if not enough were provided by the TargetTransformInfo)."),
166     cl::Hidden, cl::init(8), cl::ZeroOrMore, cl::cat(PollyCategory));
167 
168 static cl::opt<int> SecondCacheLevelAssociativity(
169     "polly-target-2nd-cache-level-associativity",
170     cl::desc("The associativity of the second cache level."), cl::Hidden,
171     cl::init(-1), cl::ZeroOrMore, cl::cat(PollyCategory));
172 
173 static cl::opt<int> SecondCacheLevelDefaultAssociativity(
174     "polly-target-2nd-cache-level-default-associativity",
175     cl::desc("The default associativity of the second cache level"
176              " (if not enough were provided by the TargetTransformInfo)."),
177     cl::Hidden, cl::init(8), cl::ZeroOrMore, cl::cat(PollyCategory));
178 
179 static cl::opt<int> FirstCacheLevelSize(
180     "polly-target-1st-cache-level-size",
181     cl::desc("The size of the first cache level specified in bytes."),
182     cl::Hidden, cl::init(-1), cl::ZeroOrMore, cl::cat(PollyCategory));
183 
184 static cl::opt<int> FirstCacheLevelDefaultSize(
185     "polly-target-1st-cache-level-default-size",
186     cl::desc("The default size of the first cache level specified in bytes"
187              " (if not enough were provided by the TargetTransformInfo)."),
188     cl::Hidden, cl::init(32768), cl::ZeroOrMore, cl::cat(PollyCategory));
189 
190 static cl::opt<int> SecondCacheLevelSize(
191     "polly-target-2nd-cache-level-size",
192     cl::desc("The size of the second level specified in bytes."), cl::Hidden,
193     cl::init(-1), cl::ZeroOrMore, cl::cat(PollyCategory));
194 
195 static cl::opt<int> SecondCacheLevelDefaultSize(
196     "polly-target-2nd-cache-level-default-size",
197     cl::desc("The default size of the second cache level specified in bytes"
198              " (if not enough were provided by the TargetTransformInfo)."),
199     cl::Hidden, cl::init(262144), cl::ZeroOrMore, cl::cat(PollyCategory));
200 
201 static cl::opt<int> VectorRegisterBitwidth(
202     "polly-target-vector-register-bitwidth",
203     cl::desc("The size in bits of a vector register (if not set, this "
204              "information is taken from LLVM's target information."),
205     cl::Hidden, cl::init(-1), cl::ZeroOrMore, cl::cat(PollyCategory));
206 
207 static cl::opt<int> FirstLevelDefaultTileSize(
208     "polly-default-tile-size",
209     cl::desc("The default tile size (if not enough were provided by"
210              " --polly-tile-sizes)"),
211     cl::Hidden, cl::init(32), cl::ZeroOrMore, cl::cat(PollyCategory));
212 
213 static cl::list<int>
214     FirstLevelTileSizes("polly-tile-sizes",
215                         cl::desc("A tile size for each loop dimension, filled "
216                                  "with --polly-default-tile-size"),
217                         cl::Hidden, cl::ZeroOrMore, cl::CommaSeparated,
218                         cl::cat(PollyCategory));
219 
220 static cl::opt<bool>
221     SecondLevelTiling("polly-2nd-level-tiling",
222                       cl::desc("Enable a 2nd level loop of loop tiling"),
223                       cl::init(false), cl::ZeroOrMore, cl::cat(PollyCategory));
224 
225 static cl::opt<int> SecondLevelDefaultTileSize(
226     "polly-2nd-level-default-tile-size",
227     cl::desc("The default 2nd-level tile size (if not enough were provided by"
228              " --polly-2nd-level-tile-sizes)"),
229     cl::Hidden, cl::init(16), cl::ZeroOrMore, cl::cat(PollyCategory));
230 
231 static cl::list<int>
232     SecondLevelTileSizes("polly-2nd-level-tile-sizes",
233                          cl::desc("A tile size for each loop dimension, filled "
234                                   "with --polly-default-tile-size"),
235                          cl::Hidden, cl::ZeroOrMore, cl::CommaSeparated,
236                          cl::cat(PollyCategory));
237 
238 static cl::opt<bool> RegisterTiling("polly-register-tiling",
239                                     cl::desc("Enable register tiling"),
240                                     cl::init(false), cl::ZeroOrMore,
241                                     cl::cat(PollyCategory));
242 
243 static cl::opt<int> RegisterDefaultTileSize(
244     "polly-register-tiling-default-tile-size",
245     cl::desc("The default register tile size (if not enough were provided by"
246              " --polly-register-tile-sizes)"),
247     cl::Hidden, cl::init(2), cl::ZeroOrMore, cl::cat(PollyCategory));
248 
249 static cl::opt<int> PollyPatternMatchingNcQuotient(
250     "polly-pattern-matching-nc-quotient",
251     cl::desc("Quotient that is obtained by dividing Nc, the parameter of the"
252              "macro-kernel, by Nr, the parameter of the micro-kernel"),
253     cl::Hidden, cl::init(256), cl::ZeroOrMore, cl::cat(PollyCategory));
254 
255 static cl::list<int>
256     RegisterTileSizes("polly-register-tile-sizes",
257                       cl::desc("A tile size for each loop dimension, filled "
258                                "with --polly-register-tile-size"),
259                       cl::Hidden, cl::ZeroOrMore, cl::CommaSeparated,
260                       cl::cat(PollyCategory));
261 
262 static cl::opt<bool>
263     PMBasedOpts("polly-pattern-matching-based-opts",
264                 cl::desc("Perform optimizations based on pattern matching"),
265                 cl::init(true), cl::ZeroOrMore, cl::cat(PollyCategory));
266 
267 static cl::opt<bool> OptimizedScops(
268     "polly-optimized-scops",
269     cl::desc("Polly - Dump polyhedral description of Scops optimized with "
270              "the isl scheduling optimizer and the set of post-scheduling "
271              "transformations is applied on the schedule tree"),
272     cl::init(false), cl::ZeroOrMore, cl::cat(PollyCategory));
273 
274 STATISTIC(ScopsProcessed, "Number of scops processed");
275 STATISTIC(ScopsRescheduled, "Number of scops rescheduled");
276 STATISTIC(ScopsOptimized, "Number of scops optimized");
277 
278 STATISTIC(NumAffineLoopsOptimized, "Number of affine loops optimized");
279 STATISTIC(NumBoxedLoopsOptimized, "Number of boxed loops optimized");
280 
281 #define THREE_STATISTICS(VARNAME, DESC)                                        \
282   static Statistic VARNAME[3] = {                                              \
283       {DEBUG_TYPE, #VARNAME "0", DESC " (original)", {0}, {false}},            \
284       {DEBUG_TYPE, #VARNAME "1", DESC " (after scheduler)", {0}, {false}},     \
285       {DEBUG_TYPE, #VARNAME "2", DESC " (after optimizer)", {0}, {false}}}
286 
287 THREE_STATISTICS(NumBands, "Number of bands");
288 THREE_STATISTICS(NumBandMembers, "Number of band members");
289 THREE_STATISTICS(NumCoincident, "Number of coincident band members");
290 THREE_STATISTICS(NumPermutable, "Number of permutable bands");
291 THREE_STATISTICS(NumFilters, "Number of filter nodes");
292 THREE_STATISTICS(NumExtension, "Number of extension nodes");
293 
294 STATISTIC(FirstLevelTileOpts, "Number of first level tiling applied");
295 STATISTIC(SecondLevelTileOpts, "Number of second level tiling applied");
296 STATISTIC(RegisterTileOpts, "Number of register tiling applied");
297 STATISTIC(PrevectOpts, "Number of strip-mining for prevectorization applied");
298 STATISTIC(MatMulOpts,
299           "Number of matrix multiplication patterns detected and optimized");
300 
301 /// Create an isl::union_set, which describes the isolate option based on
302 /// IsolateDomain.
303 ///
304 /// @param IsolateDomain An isl::set whose @p OutDimsNum last dimensions should
305 ///                      belong to the current band node.
306 /// @param OutDimsNum    A number of dimensions that should belong to
307 ///                      the current band node.
308 static isl::union_set getIsolateOptions(isl::set IsolateDomain,
309                                         unsigned OutDimsNum) {
310   unsigned Dims = IsolateDomain.dim(isl::dim::set);
311   assert(OutDimsNum <= Dims &&
312          "The isl::set IsolateDomain is used to describe the range of schedule "
313          "dimensions values, which should be isolated. Consequently, the "
314          "number of its dimensions should be greater than or equal to the "
315          "number of the schedule dimensions.");
316   isl::map IsolateRelation = isl::map::from_domain(IsolateDomain);
317   IsolateRelation = IsolateRelation.move_dims(isl::dim::out, 0, isl::dim::in,
318                                               Dims - OutDimsNum, OutDimsNum);
319   isl::set IsolateOption = IsolateRelation.wrap();
320   isl::id Id = isl::id::alloc(IsolateOption.get_ctx(), "isolate", nullptr);
321   IsolateOption = IsolateOption.set_tuple_id(Id);
322   return isl::union_set(IsolateOption);
323 }
324 
325 namespace {
326 /// Create an isl::union_set, which describes the specified option for the
327 /// dimension of the current node.
328 ///
329 /// @param Ctx    An isl::ctx, which is used to create the isl::union_set.
330 /// @param Option The name of the option.
331 isl::union_set getDimOptions(isl::ctx Ctx, const char *Option) {
332   isl::space Space(Ctx, 0, 1);
333   auto DimOption = isl::set::universe(Space);
334   auto Id = isl::id::alloc(Ctx, Option, nullptr);
335   DimOption = DimOption.set_tuple_id(Id);
336   return isl::union_set(DimOption);
337 }
338 } // namespace
339 
340 /// Create an isl::union_set, which describes the option of the form
341 /// [isolate[] -> unroll[x]].
342 ///
343 /// @param Ctx An isl::ctx, which is used to create the isl::union_set.
344 static isl::union_set getUnrollIsolatedSetOptions(isl::ctx Ctx) {
345   isl::space Space = isl::space(Ctx, 0, 0, 1);
346   isl::map UnrollIsolatedSetOption = isl::map::universe(Space);
347   isl::id DimInId = isl::id::alloc(Ctx, "isolate", nullptr);
348   isl::id DimOutId = isl::id::alloc(Ctx, "unroll", nullptr);
349   UnrollIsolatedSetOption =
350       UnrollIsolatedSetOption.set_tuple_id(isl::dim::in, DimInId);
351   UnrollIsolatedSetOption =
352       UnrollIsolatedSetOption.set_tuple_id(isl::dim::out, DimOutId);
353   return UnrollIsolatedSetOption.wrap();
354 }
355 
356 /// Make the last dimension of Set to take values from 0 to VectorWidth - 1.
357 ///
358 /// @param Set         A set, which should be modified.
359 /// @param VectorWidth A parameter, which determines the constraint.
360 static isl::set addExtentConstraints(isl::set Set, int VectorWidth) {
361   unsigned Dims = Set.dim(isl::dim::set);
362   isl::space Space = Set.get_space();
363   isl::local_space LocalSpace = isl::local_space(Space);
364   isl::constraint ExtConstr = isl::constraint::alloc_inequality(LocalSpace);
365   ExtConstr = ExtConstr.set_constant_si(0);
366   ExtConstr = ExtConstr.set_coefficient_si(isl::dim::set, Dims - 1, 1);
367   Set = Set.add_constraint(ExtConstr);
368   ExtConstr = isl::constraint::alloc_inequality(LocalSpace);
369   ExtConstr = ExtConstr.set_constant_si(VectorWidth - 1);
370   ExtConstr = ExtConstr.set_coefficient_si(isl::dim::set, Dims - 1, -1);
371   return Set.add_constraint(ExtConstr);
372 }
373 
374 isl::set getPartialTilePrefixes(isl::set ScheduleRange, int VectorWidth) {
375   unsigned Dims = ScheduleRange.dim(isl::dim::set);
376   isl::set LoopPrefixes =
377       ScheduleRange.drop_constraints_involving_dims(isl::dim::set, Dims - 1, 1);
378   auto ExtentPrefixes = addExtentConstraints(LoopPrefixes, VectorWidth);
379   isl::set BadPrefixes = ExtentPrefixes.subtract(ScheduleRange);
380   BadPrefixes = BadPrefixes.project_out(isl::dim::set, Dims - 1, 1);
381   LoopPrefixes = LoopPrefixes.project_out(isl::dim::set, Dims - 1, 1);
382   return LoopPrefixes.subtract(BadPrefixes);
383 }
384 
385 isl::schedule_node
386 ScheduleTreeOptimizer::isolateFullPartialTiles(isl::schedule_node Node,
387                                                int VectorWidth) {
388   assert(isl_schedule_node_get_type(Node.get()) == isl_schedule_node_band);
389   Node = Node.child(0).child(0);
390   isl::union_map SchedRelUMap = Node.get_prefix_schedule_relation();
391   isl::map ScheduleRelation = isl::map::from_union_map(SchedRelUMap);
392   isl::set ScheduleRange = ScheduleRelation.range();
393   isl::set IsolateDomain = getPartialTilePrefixes(ScheduleRange, VectorWidth);
394   auto AtomicOption = getDimOptions(IsolateDomain.get_ctx(), "atomic");
395   isl::union_set IsolateOption = getIsolateOptions(IsolateDomain, 1);
396   Node = Node.parent().parent();
397   isl::union_set Options = IsolateOption.unite(AtomicOption);
398   Node = Node.band_set_ast_build_options(Options);
399   return Node;
400 }
401 
402 isl::schedule_node ScheduleTreeOptimizer::prevectSchedBand(
403     isl::schedule_node Node, unsigned DimToVectorize, int VectorWidth) {
404   assert(isl_schedule_node_get_type(Node.get()) == isl_schedule_node_band);
405 
406   auto Space = isl::manage(isl_schedule_node_band_get_space(Node.get()));
407   auto ScheduleDimensions = Space.dim(isl::dim::set);
408   assert(DimToVectorize < ScheduleDimensions);
409 
410   if (DimToVectorize > 0) {
411     Node = isl::manage(
412         isl_schedule_node_band_split(Node.release(), DimToVectorize));
413     Node = Node.child(0);
414   }
415   if (DimToVectorize < ScheduleDimensions - 1)
416     Node = isl::manage(isl_schedule_node_band_split(Node.release(), 1));
417   Space = isl::manage(isl_schedule_node_band_get_space(Node.get()));
418   auto Sizes = isl::multi_val::zero(Space);
419   Sizes = Sizes.set_val(0, isl::val(Node.get_ctx(), VectorWidth));
420   Node =
421       isl::manage(isl_schedule_node_band_tile(Node.release(), Sizes.release()));
422   Node = isolateFullPartialTiles(Node, VectorWidth);
423   Node = Node.child(0);
424   // Make sure the "trivially vectorizable loop" is not unrolled. Otherwise,
425   // we will have troubles to match it in the backend.
426   Node = Node.band_set_ast_build_options(
427       isl::union_set(Node.get_ctx(), "{ unroll[x]: 1 = 0 }"));
428   Node = isl::manage(isl_schedule_node_band_sink(Node.release()));
429   Node = Node.child(0);
430   if (isl_schedule_node_get_type(Node.get()) == isl_schedule_node_leaf)
431     Node = Node.parent();
432   auto LoopMarker = isl::id::alloc(Node.get_ctx(), "SIMD", nullptr);
433   PrevectOpts++;
434   return Node.insert_mark(LoopMarker);
435 }
436 
437 isl::schedule_node ScheduleTreeOptimizer::tileNode(isl::schedule_node Node,
438                                                    const char *Identifier,
439                                                    ArrayRef<int> TileSizes,
440                                                    int DefaultTileSize) {
441   auto Space = isl::manage(isl_schedule_node_band_get_space(Node.get()));
442   auto Dims = Space.dim(isl::dim::set);
443   auto Sizes = isl::multi_val::zero(Space);
444   std::string IdentifierString(Identifier);
445   for (unsigned i = 0; i < Dims; i++) {
446     auto tileSize = i < TileSizes.size() ? TileSizes[i] : DefaultTileSize;
447     Sizes = Sizes.set_val(i, isl::val(Node.get_ctx(), tileSize));
448   }
449   auto TileLoopMarkerStr = IdentifierString + " - Tiles";
450   auto TileLoopMarker =
451       isl::id::alloc(Node.get_ctx(), TileLoopMarkerStr, nullptr);
452   Node = Node.insert_mark(TileLoopMarker);
453   Node = Node.child(0);
454   Node =
455       isl::manage(isl_schedule_node_band_tile(Node.release(), Sizes.release()));
456   Node = Node.child(0);
457   auto PointLoopMarkerStr = IdentifierString + " - Points";
458   auto PointLoopMarker =
459       isl::id::alloc(Node.get_ctx(), PointLoopMarkerStr, nullptr);
460   Node = Node.insert_mark(PointLoopMarker);
461   return Node.child(0);
462 }
463 
464 isl::schedule_node ScheduleTreeOptimizer::applyRegisterTiling(
465     isl::schedule_node Node, ArrayRef<int> TileSizes, int DefaultTileSize) {
466   Node = tileNode(Node, "Register tiling", TileSizes, DefaultTileSize);
467   auto Ctx = Node.get_ctx();
468   return Node.band_set_ast_build_options(isl::union_set(Ctx, "{unroll[x]}"));
469 }
470 
471 static bool isSimpleInnermostBand(const isl::schedule_node &Node) {
472   assert(isl_schedule_node_get_type(Node.get()) == isl_schedule_node_band);
473   assert(isl_schedule_node_n_children(Node.get()) == 1);
474 
475   auto ChildType = isl_schedule_node_get_type(Node.child(0).get());
476 
477   if (ChildType == isl_schedule_node_leaf)
478     return true;
479 
480   if (ChildType != isl_schedule_node_sequence)
481     return false;
482 
483   auto Sequence = Node.child(0);
484 
485   for (int c = 0, nc = isl_schedule_node_n_children(Sequence.get()); c < nc;
486        ++c) {
487     auto Child = Sequence.child(c);
488     if (isl_schedule_node_get_type(Child.get()) != isl_schedule_node_filter)
489       return false;
490     if (isl_schedule_node_get_type(Child.child(0).get()) !=
491         isl_schedule_node_leaf)
492       return false;
493   }
494   return true;
495 }
496 
497 bool ScheduleTreeOptimizer::isTileableBandNode(isl::schedule_node Node) {
498   if (isl_schedule_node_get_type(Node.get()) != isl_schedule_node_band)
499     return false;
500 
501   if (isl_schedule_node_n_children(Node.get()) != 1)
502     return false;
503 
504   if (!isl_schedule_node_band_get_permutable(Node.get()))
505     return false;
506 
507   auto Space = isl::manage(isl_schedule_node_band_get_space(Node.get()));
508   auto Dims = Space.dim(isl::dim::set);
509 
510   if (Dims <= 1)
511     return false;
512 
513   return isSimpleInnermostBand(Node);
514 }
515 
516 __isl_give isl::schedule_node
517 ScheduleTreeOptimizer::standardBandOpts(isl::schedule_node Node, void *User) {
518   if (FirstLevelTiling) {
519     Node = tileNode(Node, "1st level tiling", FirstLevelTileSizes,
520                     FirstLevelDefaultTileSize);
521     FirstLevelTileOpts++;
522   }
523 
524   if (SecondLevelTiling) {
525     Node = tileNode(Node, "2nd level tiling", SecondLevelTileSizes,
526                     SecondLevelDefaultTileSize);
527     SecondLevelTileOpts++;
528   }
529 
530   if (RegisterTiling) {
531     Node =
532         applyRegisterTiling(Node, RegisterTileSizes, RegisterDefaultTileSize);
533     RegisterTileOpts++;
534   }
535 
536   if (PollyVectorizerChoice == VECTORIZER_NONE)
537     return Node;
538 
539   auto Space = isl::manage(isl_schedule_node_band_get_space(Node.get()));
540   auto Dims = Space.dim(isl::dim::set);
541 
542   for (int i = Dims - 1; i >= 0; i--)
543     if (Node.band_member_get_coincident(i)) {
544       Node = prevectSchedBand(Node, i, PrevectorWidth);
545       break;
546     }
547 
548   return Node;
549 }
550 
551 /// Permute the two dimensions of the isl map.
552 ///
553 /// Permute @p DstPos and @p SrcPos dimensions of the isl map @p Map that
554 /// have type @p DimType.
555 ///
556 /// @param Map     The isl map to be modified.
557 /// @param DimType The type of the dimensions.
558 /// @param DstPos  The first dimension.
559 /// @param SrcPos  The second dimension.
560 /// @return        The modified map.
561 isl::map permuteDimensions(isl::map Map, isl::dim DimType, unsigned DstPos,
562                            unsigned SrcPos) {
563   assert(DstPos < Map.dim(DimType) && SrcPos < Map.dim(DimType));
564   if (DstPos == SrcPos)
565     return Map;
566   isl::id DimId;
567   if (Map.has_tuple_id(DimType))
568     DimId = Map.get_tuple_id(DimType);
569   auto FreeDim = DimType == isl::dim::in ? isl::dim::out : isl::dim::in;
570   isl::id FreeDimId;
571   if (Map.has_tuple_id(FreeDim))
572     FreeDimId = Map.get_tuple_id(FreeDim);
573   auto MaxDim = std::max(DstPos, SrcPos);
574   auto MinDim = std::min(DstPos, SrcPos);
575   Map = Map.move_dims(FreeDim, 0, DimType, MaxDim, 1);
576   Map = Map.move_dims(FreeDim, 0, DimType, MinDim, 1);
577   Map = Map.move_dims(DimType, MinDim, FreeDim, 1, 1);
578   Map = Map.move_dims(DimType, MaxDim, FreeDim, 0, 1);
579   if (DimId)
580     Map = Map.set_tuple_id(DimType, DimId);
581   if (FreeDimId)
582     Map = Map.set_tuple_id(FreeDim, FreeDimId);
583   return Map;
584 }
585 
586 /// Check the form of the access relation.
587 ///
588 /// Check that the access relation @p AccMap has the form M[i][j], where i
589 /// is a @p FirstPos and j is a @p SecondPos.
590 ///
591 /// @param AccMap    The access relation to be checked.
592 /// @param FirstPos  The index of the input dimension that is mapped to
593 ///                  the first output dimension.
594 /// @param SecondPos The index of the input dimension that is mapped to the
595 ///                  second output dimension.
596 /// @return          True in case @p AccMap has the expected form and false,
597 ///                  otherwise.
598 static bool isMatMulOperandAcc(isl::set Domain, isl::map AccMap, int &FirstPos,
599                                int &SecondPos) {
600   isl::space Space = AccMap.get_space();
601   isl::map Universe = isl::map::universe(Space);
602 
603   if (Space.dim(isl::dim::out) != 2)
604     return false;
605 
606   // MatMul has the form:
607   // for (i = 0; i < N; i++)
608   //   for (j = 0; j < M; j++)
609   //     for (k = 0; k < P; k++)
610   //       C[i, j] += A[i, k] * B[k, j]
611   //
612   // Permutation of three outer loops: 3! = 6 possibilities.
613   int FirstDims[] = {0, 0, 1, 1, 2, 2};
614   int SecondDims[] = {1, 2, 2, 0, 0, 1};
615   for (int i = 0; i < 6; i += 1) {
616     auto PossibleMatMul =
617         Universe.equate(isl::dim::in, FirstDims[i], isl::dim::out, 0)
618             .equate(isl::dim::in, SecondDims[i], isl::dim::out, 1);
619 
620     AccMap = AccMap.intersect_domain(Domain);
621     PossibleMatMul = PossibleMatMul.intersect_domain(Domain);
622 
623     // If AccMap spans entire domain (Non-partial write),
624     // compute FirstPos and SecondPos.
625     // If AccMap != PossibleMatMul here (the two maps have been gisted at
626     // this point), it means that the writes are not complete, or in other
627     // words, it is a Partial write and Partial writes must be rejected.
628     if (AccMap.is_equal(PossibleMatMul)) {
629       if (FirstPos != -1 && FirstPos != FirstDims[i])
630         continue;
631       FirstPos = FirstDims[i];
632       if (SecondPos != -1 && SecondPos != SecondDims[i])
633         continue;
634       SecondPos = SecondDims[i];
635       return true;
636     }
637   }
638 
639   return false;
640 }
641 
642 /// Does the memory access represent a non-scalar operand of the matrix
643 /// multiplication.
644 ///
645 /// Check that the memory access @p MemAccess is the read access to a non-scalar
646 /// operand of the matrix multiplication or its result.
647 ///
648 /// @param MemAccess The memory access to be checked.
649 /// @param MMI       Parameters of the matrix multiplication operands.
650 /// @return          True in case the memory access represents the read access
651 ///                  to a non-scalar operand of the matrix multiplication and
652 ///                  false, otherwise.
653 static bool isMatMulNonScalarReadAccess(MemoryAccess *MemAccess,
654                                         MatMulInfoTy &MMI) {
655   if (!MemAccess->isLatestArrayKind() || !MemAccess->isRead())
656     return false;
657   auto AccMap = MemAccess->getLatestAccessRelation();
658   isl::set StmtDomain = MemAccess->getStatement()->getDomain();
659   if (isMatMulOperandAcc(StmtDomain, AccMap, MMI.i, MMI.j) && !MMI.ReadFromC) {
660     MMI.ReadFromC = MemAccess;
661     return true;
662   }
663   if (isMatMulOperandAcc(StmtDomain, AccMap, MMI.i, MMI.k) && !MMI.A) {
664     MMI.A = MemAccess;
665     return true;
666   }
667   if (isMatMulOperandAcc(StmtDomain, AccMap, MMI.k, MMI.j) && !MMI.B) {
668     MMI.B = MemAccess;
669     return true;
670   }
671   return false;
672 }
673 
674 /// Check accesses to operands of the matrix multiplication.
675 ///
676 /// Check that accesses of the SCoP statement, which corresponds to
677 /// the partial schedule @p PartialSchedule, are scalar in terms of loops
678 /// containing the matrix multiplication, in case they do not represent
679 /// accesses to the non-scalar operands of the matrix multiplication or
680 /// its result.
681 ///
682 /// @param  PartialSchedule The partial schedule of the SCoP statement.
683 /// @param  MMI             Parameters of the matrix multiplication operands.
684 /// @return                 True in case the corresponding SCoP statement
685 ///                         represents matrix multiplication and false,
686 ///                         otherwise.
687 static bool containsOnlyMatrMultAcc(isl::map PartialSchedule,
688                                     MatMulInfoTy &MMI) {
689   auto InputDimId = PartialSchedule.get_tuple_id(isl::dim::in);
690   auto *Stmt = static_cast<ScopStmt *>(InputDimId.get_user());
691   unsigned OutDimNum = PartialSchedule.dim(isl::dim::out);
692   assert(OutDimNum > 2 && "In case of the matrix multiplication the loop nest "
693                           "and, consequently, the corresponding scheduling "
694                           "functions have at least three dimensions.");
695   auto MapI =
696       permuteDimensions(PartialSchedule, isl::dim::out, MMI.i, OutDimNum - 1);
697   auto MapJ =
698       permuteDimensions(PartialSchedule, isl::dim::out, MMI.j, OutDimNum - 1);
699   auto MapK =
700       permuteDimensions(PartialSchedule, isl::dim::out, MMI.k, OutDimNum - 1);
701 
702   auto Accesses = getAccessesInOrder(*Stmt);
703   for (auto *MemA = Accesses.begin(); MemA != Accesses.end() - 1; MemA++) {
704     auto *MemAccessPtr = *MemA;
705     if (MemAccessPtr->isLatestArrayKind() && MemAccessPtr != MMI.WriteToC &&
706         !isMatMulNonScalarReadAccess(MemAccessPtr, MMI) &&
707         !(MemAccessPtr->isStrideZero(MapI)) &&
708         MemAccessPtr->isStrideZero(MapJ) && MemAccessPtr->isStrideZero(MapK))
709       return false;
710   }
711   return true;
712 }
713 
714 /// Check for dependencies corresponding to the matrix multiplication.
715 ///
716 /// Check that there is only true dependence of the form
717 /// S(..., k, ...) -> S(..., k + 1, …), where S is the SCoP statement
718 /// represented by @p Schedule and k is @p Pos. Such a dependence corresponds
719 /// to the dependency produced by the matrix multiplication.
720 ///
721 /// @param  Schedule The schedule of the SCoP statement.
722 /// @param  D The SCoP dependencies.
723 /// @param  Pos The parameter to describe an acceptable true dependence.
724 ///             In case it has a negative value, try to determine its
725 ///             acceptable value.
726 /// @return True in case dependencies correspond to the matrix multiplication
727 ///         and false, otherwise.
728 static bool containsOnlyMatMulDep(isl::map Schedule, const Dependences *D,
729                                   int &Pos) {
730   isl::union_map Dep = D->getDependences(Dependences::TYPE_RAW);
731   isl::union_map Red = D->getDependences(Dependences::TYPE_RED);
732   if (Red)
733     Dep = Dep.unite(Red);
734   auto DomainSpace = Schedule.get_space().domain();
735   auto Space = DomainSpace.map_from_domain_and_range(DomainSpace);
736   auto Deltas = Dep.extract_map(Space).deltas();
737   int DeltasDimNum = Deltas.dim(isl::dim::set);
738   for (int i = 0; i < DeltasDimNum; i++) {
739     auto Val = Deltas.plain_get_val_if_fixed(isl::dim::set, i);
740     Pos = Pos < 0 && Val.is_one() ? i : Pos;
741     if (Val.is_nan() || !(Val.is_zero() || (i == Pos && Val.is_one())))
742       return false;
743   }
744   if (DeltasDimNum == 0 || Pos < 0)
745     return false;
746   return true;
747 }
748 
749 /// Check if the SCoP statement could probably be optimized with analytical
750 /// modeling.
751 ///
752 /// containsMatrMult tries to determine whether the following conditions
753 /// are true:
754 /// 1. The last memory access modeling an array, MA1, represents writing to
755 ///    memory and has the form S(..., i1, ..., i2, ...) -> M(i1, i2) or
756 ///    S(..., i2, ..., i1, ...) -> M(i1, i2), where S is the SCoP statement
757 ///    under consideration.
758 /// 2. There is only one loop-carried true dependency, and it has the
759 ///    form S(..., i3, ...) -> S(..., i3 + 1, ...), and there are no
760 ///    loop-carried or anti dependencies.
761 /// 3. SCoP contains three access relations, MA2, MA3, and MA4 that represent
762 ///    reading from memory and have the form S(..., i3, ...) -> M(i1, i3),
763 ///    S(..., i3, ...) -> M(i3, i2), S(...) -> M(i1, i2), respectively,
764 ///    and all memory accesses of the SCoP that are different from MA1, MA2,
765 ///    MA3, and MA4 have stride 0, if the innermost loop is exchanged with any
766 ///    of loops i1, i2 and i3.
767 ///
768 /// @param PartialSchedule The PartialSchedule that contains a SCoP statement
769 ///        to check.
770 /// @D     The SCoP dependencies.
771 /// @MMI   Parameters of the matrix multiplication operands.
772 static bool containsMatrMult(isl::map PartialSchedule, const Dependences *D,
773                              MatMulInfoTy &MMI) {
774   auto InputDimsId = PartialSchedule.get_tuple_id(isl::dim::in);
775   auto *Stmt = static_cast<ScopStmt *>(InputDimsId.get_user());
776   if (Stmt->size() <= 1)
777     return false;
778 
779   auto Accesses = getAccessesInOrder(*Stmt);
780   for (auto *MemA = Accesses.end() - 1; MemA != Accesses.begin(); MemA--) {
781     auto *MemAccessPtr = *MemA;
782     if (!MemAccessPtr->isLatestArrayKind())
783       continue;
784     if (!MemAccessPtr->isWrite())
785       return false;
786     auto AccMap = MemAccessPtr->getLatestAccessRelation();
787     if (!isMatMulOperandAcc(Stmt->getDomain(), AccMap, MMI.i, MMI.j))
788       return false;
789     MMI.WriteToC = MemAccessPtr;
790     break;
791   }
792 
793   if (!containsOnlyMatMulDep(PartialSchedule, D, MMI.k))
794     return false;
795 
796   if (!MMI.WriteToC || !containsOnlyMatrMultAcc(PartialSchedule, MMI))
797     return false;
798 
799   if (!MMI.A || !MMI.B || !MMI.ReadFromC)
800     return false;
801   return true;
802 }
803 
804 /// Permute two dimensions of the band node.
805 ///
806 /// Permute FirstDim and SecondDim dimensions of the Node.
807 ///
808 /// @param Node The band node to be modified.
809 /// @param FirstDim The first dimension to be permuted.
810 /// @param SecondDim The second dimension to be permuted.
811 static isl::schedule_node permuteBandNodeDimensions(isl::schedule_node Node,
812                                                     unsigned FirstDim,
813                                                     unsigned SecondDim) {
814   assert(isl_schedule_node_get_type(Node.get()) == isl_schedule_node_band &&
815          isl_schedule_node_band_n_member(Node.get()) >
816              std::max(FirstDim, SecondDim));
817   auto PartialSchedule =
818       isl::manage(isl_schedule_node_band_get_partial_schedule(Node.get()));
819   auto PartialScheduleFirstDim = PartialSchedule.get_union_pw_aff(FirstDim);
820   auto PartialScheduleSecondDim = PartialSchedule.get_union_pw_aff(SecondDim);
821   PartialSchedule =
822       PartialSchedule.set_union_pw_aff(SecondDim, PartialScheduleFirstDim);
823   PartialSchedule =
824       PartialSchedule.set_union_pw_aff(FirstDim, PartialScheduleSecondDim);
825   Node = isl::manage(isl_schedule_node_delete(Node.release()));
826   return Node.insert_partial_schedule(PartialSchedule);
827 }
828 
829 isl::schedule_node ScheduleTreeOptimizer::createMicroKernel(
830     isl::schedule_node Node, MicroKernelParamsTy MicroKernelParams) {
831   Node = applyRegisterTiling(Node, {MicroKernelParams.Mr, MicroKernelParams.Nr},
832                              1);
833   Node = Node.parent().parent();
834   return permuteBandNodeDimensions(Node, 0, 1).child(0).child(0);
835 }
836 
837 isl::schedule_node ScheduleTreeOptimizer::createMacroKernel(
838     isl::schedule_node Node, MacroKernelParamsTy MacroKernelParams) {
839   assert(isl_schedule_node_get_type(Node.get()) == isl_schedule_node_band);
840   if (MacroKernelParams.Mc == 1 && MacroKernelParams.Nc == 1 &&
841       MacroKernelParams.Kc == 1)
842     return Node;
843   int DimOutNum = isl_schedule_node_band_n_member(Node.get());
844   std::vector<int> TileSizes(DimOutNum, 1);
845   TileSizes[DimOutNum - 3] = MacroKernelParams.Mc;
846   TileSizes[DimOutNum - 2] = MacroKernelParams.Nc;
847   TileSizes[DimOutNum - 1] = MacroKernelParams.Kc;
848   Node = tileNode(Node, "1st level tiling", TileSizes, 1);
849   Node = Node.parent().parent();
850   Node = permuteBandNodeDimensions(Node, DimOutNum - 2, DimOutNum - 1);
851   Node = permuteBandNodeDimensions(Node, DimOutNum - 3, DimOutNum - 1);
852   return Node.child(0).child(0);
853 }
854 
855 /// Get the size of the widest type of the matrix multiplication operands
856 /// in bytes, including alignment padding.
857 ///
858 /// @param MMI Parameters of the matrix multiplication operands.
859 /// @return The size of the widest type of the matrix multiplication operands
860 ///         in bytes, including alignment padding.
861 static uint64_t getMatMulAlignTypeSize(MatMulInfoTy MMI) {
862   auto *S = MMI.A->getStatement()->getParent();
863   auto &DL = S->getFunction().getParent()->getDataLayout();
864   auto ElementSizeA = DL.getTypeAllocSize(MMI.A->getElementType());
865   auto ElementSizeB = DL.getTypeAllocSize(MMI.B->getElementType());
866   auto ElementSizeC = DL.getTypeAllocSize(MMI.WriteToC->getElementType());
867   return std::max({ElementSizeA, ElementSizeB, ElementSizeC});
868 }
869 
870 /// Get the size of the widest type of the matrix multiplication operands
871 /// in bits.
872 ///
873 /// @param MMI Parameters of the matrix multiplication operands.
874 /// @return The size of the widest type of the matrix multiplication operands
875 ///         in bits.
876 static uint64_t getMatMulTypeSize(MatMulInfoTy MMI) {
877   auto *S = MMI.A->getStatement()->getParent();
878   auto &DL = S->getFunction().getParent()->getDataLayout();
879   auto ElementSizeA = DL.getTypeSizeInBits(MMI.A->getElementType());
880   auto ElementSizeB = DL.getTypeSizeInBits(MMI.B->getElementType());
881   auto ElementSizeC = DL.getTypeSizeInBits(MMI.WriteToC->getElementType());
882   return std::max({ElementSizeA, ElementSizeB, ElementSizeC});
883 }
884 
885 /// Get parameters of the BLIS micro kernel.
886 ///
887 /// We choose the Mr and Nr parameters of the micro kernel to be large enough
888 /// such that no stalls caused by the combination of latencies and dependencies
889 /// are introduced during the updates of the resulting matrix of the matrix
890 /// multiplication. However, they should also be as small as possible to
891 /// release more registers for entries of multiplied matrices.
892 ///
893 /// @param TTI Target Transform Info.
894 /// @param MMI Parameters of the matrix multiplication operands.
895 /// @return The structure of type MicroKernelParamsTy.
896 /// @see MicroKernelParamsTy
897 static struct MicroKernelParamsTy
898 getMicroKernelParams(const TargetTransformInfo *TTI, MatMulInfoTy MMI) {
899   assert(TTI && "The target transform info should be provided.");
900 
901   // Nvec - Number of double-precision floating-point numbers that can be hold
902   // by a vector register. Use 2 by default.
903   long RegisterBitwidth = VectorRegisterBitwidth;
904 
905   if (RegisterBitwidth == -1)
906     RegisterBitwidth = TTI->getRegisterBitWidth(true);
907   auto ElementSize = getMatMulTypeSize(MMI);
908   assert(ElementSize > 0 && "The element size of the matrix multiplication "
909                             "operands should be greater than zero.");
910   auto Nvec = RegisterBitwidth / ElementSize;
911   if (Nvec == 0)
912     Nvec = 2;
913   int Nr =
914       ceil(sqrt(Nvec * LatencyVectorFma * ThroughputVectorFma) / Nvec) * Nvec;
915   int Mr = ceil(Nvec * LatencyVectorFma * ThroughputVectorFma / Nr);
916   return {Mr, Nr};
917 }
918 
919 namespace {
920 /// Determine parameters of the target cache.
921 ///
922 /// @param TTI Target Transform Info.
923 void getTargetCacheParameters(const llvm::TargetTransformInfo *TTI) {
924   auto L1DCache = llvm::TargetTransformInfo::CacheLevel::L1D;
925   auto L2DCache = llvm::TargetTransformInfo::CacheLevel::L2D;
926   if (FirstCacheLevelSize == -1) {
927     if (TTI->getCacheSize(L1DCache).hasValue())
928       FirstCacheLevelSize = TTI->getCacheSize(L1DCache).getValue();
929     else
930       FirstCacheLevelSize = static_cast<int>(FirstCacheLevelDefaultSize);
931   }
932   if (SecondCacheLevelSize == -1) {
933     if (TTI->getCacheSize(L2DCache).hasValue())
934       SecondCacheLevelSize = TTI->getCacheSize(L2DCache).getValue();
935     else
936       SecondCacheLevelSize = static_cast<int>(SecondCacheLevelDefaultSize);
937   }
938   if (FirstCacheLevelAssociativity == -1) {
939     if (TTI->getCacheAssociativity(L1DCache).hasValue())
940       FirstCacheLevelAssociativity =
941           TTI->getCacheAssociativity(L1DCache).getValue();
942     else
943       FirstCacheLevelAssociativity =
944           static_cast<int>(FirstCacheLevelDefaultAssociativity);
945   }
946   if (SecondCacheLevelAssociativity == -1) {
947     if (TTI->getCacheAssociativity(L2DCache).hasValue())
948       SecondCacheLevelAssociativity =
949           TTI->getCacheAssociativity(L2DCache).getValue();
950     else
951       SecondCacheLevelAssociativity =
952           static_cast<int>(SecondCacheLevelDefaultAssociativity);
953   }
954 }
955 } // namespace
956 
957 /// Get parameters of the BLIS macro kernel.
958 ///
959 /// During the computation of matrix multiplication, blocks of partitioned
960 /// matrices are mapped to different layers of the memory hierarchy.
961 /// To optimize data reuse, blocks should be ideally kept in cache between
962 /// iterations. Since parameters of the macro kernel determine sizes of these
963 /// blocks, there are upper and lower bounds on these parameters.
964 ///
965 /// @param TTI Target Transform Info.
966 /// @param MicroKernelParams Parameters of the micro-kernel
967 ///                          to be taken into account.
968 /// @param MMI Parameters of the matrix multiplication operands.
969 /// @return The structure of type MacroKernelParamsTy.
970 /// @see MacroKernelParamsTy
971 /// @see MicroKernelParamsTy
972 static struct MacroKernelParamsTy
973 getMacroKernelParams(const llvm::TargetTransformInfo *TTI,
974                      const MicroKernelParamsTy &MicroKernelParams,
975                      MatMulInfoTy MMI) {
976   getTargetCacheParameters(TTI);
977   // According to www.cs.utexas.edu/users/flame/pubs/TOMS-BLIS-Analytical.pdf,
978   // it requires information about the first two levels of a cache to determine
979   // all the parameters of a macro-kernel. It also checks that an associativity
980   // degree of a cache level is greater than two. Otherwise, another algorithm
981   // for determination of the parameters should be used.
982   if (!(MicroKernelParams.Mr > 0 && MicroKernelParams.Nr > 0 &&
983         FirstCacheLevelSize > 0 && SecondCacheLevelSize > 0 &&
984         FirstCacheLevelAssociativity > 2 && SecondCacheLevelAssociativity > 2))
985     return {1, 1, 1};
986   // The quotient should be greater than zero.
987   if (PollyPatternMatchingNcQuotient <= 0)
988     return {1, 1, 1};
989   int Car = floor(
990       (FirstCacheLevelAssociativity - 1) /
991       (1 + static_cast<double>(MicroKernelParams.Nr) / MicroKernelParams.Mr));
992 
993   // Car can be computed to be zero since it is floor to int.
994   // On Mac OS, division by 0 does not raise a signal. This causes negative
995   // tile sizes to be computed. Prevent division by Cac==0 by early returning
996   // if this happens.
997   if (Car == 0)
998     return {1, 1, 1};
999 
1000   auto ElementSize = getMatMulAlignTypeSize(MMI);
1001   assert(ElementSize > 0 && "The element size of the matrix multiplication "
1002                             "operands should be greater than zero.");
1003   int Kc = (Car * FirstCacheLevelSize) /
1004            (MicroKernelParams.Mr * FirstCacheLevelAssociativity * ElementSize);
1005   double Cac =
1006       static_cast<double>(Kc * ElementSize * SecondCacheLevelAssociativity) /
1007       SecondCacheLevelSize;
1008   int Mc = floor((SecondCacheLevelAssociativity - 2) / Cac);
1009   int Nc = PollyPatternMatchingNcQuotient * MicroKernelParams.Nr;
1010 
1011   assert(Mc > 0 && Nc > 0 && Kc > 0 &&
1012          "Matrix block sizes should be  greater than zero");
1013   return {Mc, Nc, Kc};
1014 }
1015 
1016 /// Create an access relation that is specific to
1017 ///        the matrix multiplication pattern.
1018 ///
1019 /// Create an access relation of the following form:
1020 /// [O0, O1, O2, O3, O4, O5, O6, O7, O8] -> [OI, O5, OJ]
1021 /// where I is @p FirstDim, J is @p SecondDim.
1022 ///
1023 /// It can be used, for example, to create relations that helps to consequently
1024 /// access elements of operands of a matrix multiplication after creation of
1025 /// the BLIS micro and macro kernels.
1026 ///
1027 /// @see ScheduleTreeOptimizer::createMicroKernel
1028 /// @see ScheduleTreeOptimizer::createMacroKernel
1029 ///
1030 /// Subsequently, the described access relation is applied to the range of
1031 /// @p MapOldIndVar, that is used to map original induction variables to
1032 /// the ones, which are produced by schedule transformations. It helps to
1033 /// define relations using a new space and, at the same time, keep them
1034 /// in the original one.
1035 ///
1036 /// @param MapOldIndVar The relation, which maps original induction variables
1037 ///                     to the ones, which are produced by schedule
1038 ///                     transformations.
1039 /// @param FirstDim, SecondDim The input dimensions that are used to define
1040 ///        the specified access relation.
1041 /// @return The specified access relation.
1042 isl::map getMatMulAccRel(isl::map MapOldIndVar, unsigned FirstDim,
1043                          unsigned SecondDim) {
1044   auto AccessRelSpace = isl::space(MapOldIndVar.get_ctx(), 0, 9, 3);
1045   auto AccessRel = isl::map::universe(AccessRelSpace);
1046   AccessRel = AccessRel.equate(isl::dim::in, FirstDim, isl::dim::out, 0);
1047   AccessRel = AccessRel.equate(isl::dim::in, 5, isl::dim::out, 1);
1048   AccessRel = AccessRel.equate(isl::dim::in, SecondDim, isl::dim::out, 2);
1049   return MapOldIndVar.apply_range(AccessRel);
1050 }
1051 
1052 isl::schedule_node createExtensionNode(isl::schedule_node Node,
1053                                        isl::map ExtensionMap) {
1054   auto Extension = isl::union_map(ExtensionMap);
1055   auto NewNode = isl::schedule_node::from_extension(Extension);
1056   return Node.graft_before(NewNode);
1057 }
1058 
1059 /// Apply the packing transformation.
1060 ///
1061 /// The packing transformation can be described as a data-layout
1062 /// transformation that requires to introduce a new array, copy data
1063 /// to the array, and change memory access locations to reference the array.
1064 /// It can be used to ensure that elements of the new array are read in-stride
1065 /// access, aligned to cache lines boundaries, and preloaded into certain cache
1066 /// levels.
1067 ///
1068 /// As an example let us consider the packing of the array A that would help
1069 /// to read its elements with in-stride access. An access to the array A
1070 /// is represented by an access relation that has the form
1071 /// S[i, j, k] -> A[i, k]. The scheduling function of the SCoP statement S has
1072 /// the form S[i,j, k] -> [floor((j mod Nc) / Nr), floor((i mod Mc) / Mr),
1073 /// k mod Kc, j mod Nr, i mod Mr].
1074 ///
1075 /// To ensure that elements of the array A are read in-stride access, we add
1076 /// a new array Packed_A[Mc/Mr][Kc][Mr] to the SCoP, using
1077 /// Scop::createScopArrayInfo, change the access relation
1078 /// S[i, j, k] -> A[i, k] to
1079 /// S[i, j, k] -> Packed_A[floor((i mod Mc) / Mr), k mod Kc, i mod Mr], using
1080 /// MemoryAccess::setNewAccessRelation, and copy the data to the array, using
1081 /// the copy statement created by Scop::addScopStmt.
1082 ///
1083 /// @param Node The schedule node to be optimized.
1084 /// @param MapOldIndVar The relation, which maps original induction variables
1085 ///                     to the ones, which are produced by schedule
1086 ///                     transformations.
1087 /// @param MicroParams, MacroParams Parameters of the BLIS kernel
1088 ///                                 to be taken into account.
1089 /// @param MMI Parameters of the matrix multiplication operands.
1090 /// @return The optimized schedule node.
1091 static isl::schedule_node
1092 optimizeDataLayoutMatrMulPattern(isl::schedule_node Node, isl::map MapOldIndVar,
1093                                  MicroKernelParamsTy MicroParams,
1094                                  MacroKernelParamsTy MacroParams,
1095                                  MatMulInfoTy &MMI) {
1096   auto InputDimsId = MapOldIndVar.get_tuple_id(isl::dim::in);
1097   auto *Stmt = static_cast<ScopStmt *>(InputDimsId.get_user());
1098 
1099   // Create a copy statement that corresponds to the memory access to the
1100   // matrix B, the second operand of the matrix multiplication.
1101   Node = Node.parent().parent().parent().parent().parent().parent();
1102   Node = isl::manage(isl_schedule_node_band_split(Node.release(), 2)).child(0);
1103   auto AccRel = getMatMulAccRel(MapOldIndVar, 3, 7);
1104   unsigned FirstDimSize = MacroParams.Nc / MicroParams.Nr;
1105   unsigned SecondDimSize = MacroParams.Kc;
1106   unsigned ThirdDimSize = MicroParams.Nr;
1107   auto *SAI = Stmt->getParent()->createScopArrayInfo(
1108       MMI.B->getElementType(), "Packed_B",
1109       {FirstDimSize, SecondDimSize, ThirdDimSize});
1110   AccRel = AccRel.set_tuple_id(isl::dim::out, SAI->getBasePtrId());
1111   auto OldAcc = MMI.B->getLatestAccessRelation();
1112   MMI.B->setNewAccessRelation(AccRel);
1113   auto ExtMap = MapOldIndVar.project_out(isl::dim::out, 2,
1114                                          MapOldIndVar.dim(isl::dim::out) - 2);
1115   ExtMap = ExtMap.reverse();
1116   ExtMap = ExtMap.fix_si(isl::dim::out, MMI.i, 0);
1117   auto Domain = Stmt->getDomain();
1118 
1119   // Restrict the domains of the copy statements to only execute when also its
1120   // originating statement is executed.
1121   auto DomainId = Domain.get_tuple_id();
1122   auto *NewStmt = Stmt->getParent()->addScopStmt(
1123       OldAcc, MMI.B->getLatestAccessRelation(), Domain);
1124   ExtMap = ExtMap.set_tuple_id(isl::dim::out, DomainId);
1125   ExtMap = ExtMap.intersect_range(Domain);
1126   ExtMap = ExtMap.set_tuple_id(isl::dim::out, NewStmt->getDomainId());
1127   Node = createExtensionNode(Node, ExtMap);
1128 
1129   // Create a copy statement that corresponds to the memory access
1130   // to the matrix A, the first operand of the matrix multiplication.
1131   Node = Node.child(0);
1132   AccRel = getMatMulAccRel(MapOldIndVar, 4, 6);
1133   FirstDimSize = MacroParams.Mc / MicroParams.Mr;
1134   ThirdDimSize = MicroParams.Mr;
1135   SAI = Stmt->getParent()->createScopArrayInfo(
1136       MMI.A->getElementType(), "Packed_A",
1137       {FirstDimSize, SecondDimSize, ThirdDimSize});
1138   AccRel = AccRel.set_tuple_id(isl::dim::out, SAI->getBasePtrId());
1139   OldAcc = MMI.A->getLatestAccessRelation();
1140   MMI.A->setNewAccessRelation(AccRel);
1141   ExtMap = MapOldIndVar.project_out(isl::dim::out, 3,
1142                                     MapOldIndVar.dim(isl::dim::out) - 3);
1143   ExtMap = ExtMap.reverse();
1144   ExtMap = ExtMap.fix_si(isl::dim::out, MMI.j, 0);
1145   NewStmt = Stmt->getParent()->addScopStmt(
1146       OldAcc, MMI.A->getLatestAccessRelation(), Domain);
1147 
1148   // Restrict the domains of the copy statements to only execute when also its
1149   // originating statement is executed.
1150   ExtMap = ExtMap.set_tuple_id(isl::dim::out, DomainId);
1151   ExtMap = ExtMap.intersect_range(Domain);
1152   ExtMap = ExtMap.set_tuple_id(isl::dim::out, NewStmt->getDomainId());
1153   Node = createExtensionNode(Node, ExtMap);
1154   return Node.child(0).child(0).child(0).child(0).child(0);
1155 }
1156 
1157 /// Get a relation mapping induction variables produced by schedule
1158 /// transformations to the original ones.
1159 ///
1160 /// @param Node The schedule node produced as the result of creation
1161 ///        of the BLIS kernels.
1162 /// @param MicroKernelParams, MacroKernelParams Parameters of the BLIS kernel
1163 ///                                             to be taken into account.
1164 /// @return  The relation mapping original induction variables to the ones
1165 ///          produced by schedule transformation.
1166 /// @see ScheduleTreeOptimizer::createMicroKernel
1167 /// @see ScheduleTreeOptimizer::createMacroKernel
1168 /// @see getMacroKernelParams
1169 isl::map
1170 getInductionVariablesSubstitution(isl::schedule_node Node,
1171                                   MicroKernelParamsTy MicroKernelParams,
1172                                   MacroKernelParamsTy MacroKernelParams) {
1173   auto Child = Node.child(0);
1174   auto UnMapOldIndVar = Child.get_prefix_schedule_union_map();
1175   auto MapOldIndVar = isl::map::from_union_map(UnMapOldIndVar);
1176   if (MapOldIndVar.dim(isl::dim::out) > 9)
1177     return MapOldIndVar.project_out(isl::dim::out, 0,
1178                                     MapOldIndVar.dim(isl::dim::out) - 9);
1179   return MapOldIndVar;
1180 }
1181 
1182 /// Isolate a set of partial tile prefixes and unroll the isolated part.
1183 ///
1184 /// The set should ensure that it contains only partial tile prefixes that have
1185 /// exactly Mr x Nr iterations of the two innermost loops produced by
1186 /// the optimization of the matrix multiplication. Mr and Nr are parameters of
1187 /// the micro-kernel.
1188 ///
1189 /// In case of parametric bounds, this helps to auto-vectorize the unrolled
1190 /// innermost loops, using the SLP vectorizer.
1191 ///
1192 /// @param Node              The schedule node to be modified.
1193 /// @param MicroKernelParams Parameters of the micro-kernel
1194 ///                          to be taken into account.
1195 /// @return The modified isl_schedule_node.
1196 static isl::schedule_node
1197 isolateAndUnrollMatMulInnerLoops(isl::schedule_node Node,
1198                                  struct MicroKernelParamsTy MicroKernelParams) {
1199   isl::schedule_node Child = Node.get_child(0);
1200   isl::union_map UnMapOldIndVar = Child.get_prefix_schedule_relation();
1201   isl::set Prefix = isl::map::from_union_map(UnMapOldIndVar).range();
1202   unsigned Dims = Prefix.dim(isl::dim::set);
1203   Prefix = Prefix.project_out(isl::dim::set, Dims - 1, 1);
1204   Prefix = getPartialTilePrefixes(Prefix, MicroKernelParams.Nr);
1205   Prefix = getPartialTilePrefixes(Prefix, MicroKernelParams.Mr);
1206 
1207   isl::union_set IsolateOption =
1208       getIsolateOptions(Prefix.add_dims(isl::dim::set, 3), 3);
1209   isl::ctx Ctx = Node.get_ctx();
1210   auto Options = IsolateOption.unite(getDimOptions(Ctx, "unroll"));
1211   Options = Options.unite(getUnrollIsolatedSetOptions(Ctx));
1212   Node = Node.band_set_ast_build_options(Options);
1213   Node = Node.parent().parent().parent();
1214   IsolateOption = getIsolateOptions(Prefix, 3);
1215   Options = IsolateOption.unite(getDimOptions(Ctx, "separate"));
1216   Node = Node.band_set_ast_build_options(Options);
1217   Node = Node.child(0).child(0).child(0);
1218   return Node;
1219 }
1220 
1221 /// Mark @p BasePtr with "Inter iteration alias-free" mark node.
1222 ///
1223 /// @param Node The child of the mark node to be inserted.
1224 /// @param BasePtr The pointer to be marked.
1225 /// @return The modified isl_schedule_node.
1226 static isl::schedule_node markInterIterationAliasFree(isl::schedule_node Node,
1227                                                       Value *BasePtr) {
1228   if (!BasePtr)
1229     return Node;
1230 
1231   auto Id =
1232       isl::id::alloc(Node.get_ctx(), "Inter iteration alias-free", BasePtr);
1233   return Node.insert_mark(Id).child(0);
1234 }
1235 
1236 /// Insert "Loop Vectorizer Disabled" mark node.
1237 ///
1238 /// @param Node The child of the mark node to be inserted.
1239 /// @return The modified isl_schedule_node.
1240 static isl::schedule_node markLoopVectorizerDisabled(isl::schedule_node Node) {
1241   auto Id = isl::id::alloc(Node.get_ctx(), "Loop Vectorizer Disabled", nullptr);
1242   return Node.insert_mark(Id).child(0);
1243 }
1244 
1245 /// Restore the initial ordering of dimensions of the band node
1246 ///
1247 /// In case the band node represents all the dimensions of the iteration
1248 /// domain, recreate the band node to restore the initial ordering of the
1249 /// dimensions.
1250 ///
1251 /// @param Node The band node to be modified.
1252 /// @return The modified schedule node.
1253 static isl::schedule_node
1254 getBandNodeWithOriginDimOrder(isl::schedule_node Node) {
1255   assert(isl_schedule_node_get_type(Node.get()) == isl_schedule_node_band);
1256   if (isl_schedule_node_get_type(Node.child(0).get()) != isl_schedule_node_leaf)
1257     return Node;
1258   auto Domain = Node.get_universe_domain();
1259   assert(isl_union_set_n_set(Domain.get()) == 1);
1260   if (Node.get_schedule_depth() != 0 ||
1261       (isl::set(Domain).dim(isl::dim::set) !=
1262        isl_schedule_node_band_n_member(Node.get())))
1263     return Node;
1264   Node = isl::manage(isl_schedule_node_delete(Node.copy()));
1265   auto PartialSchedulePwAff = Domain.identity_union_pw_multi_aff();
1266   auto PartialScheduleMultiPwAff =
1267       isl::multi_union_pw_aff(PartialSchedulePwAff);
1268   PartialScheduleMultiPwAff =
1269       PartialScheduleMultiPwAff.reset_tuple_id(isl::dim::set);
1270   return Node.insert_partial_schedule(PartialScheduleMultiPwAff);
1271 }
1272 
1273 isl::schedule_node
1274 ScheduleTreeOptimizer::optimizeMatMulPattern(isl::schedule_node Node,
1275                                              const TargetTransformInfo *TTI,
1276                                              MatMulInfoTy &MMI) {
1277   assert(TTI && "The target transform info should be provided.");
1278   Node = markInterIterationAliasFree(
1279       Node, MMI.WriteToC->getLatestScopArrayInfo()->getBasePtr());
1280   int DimOutNum = isl_schedule_node_band_n_member(Node.get());
1281   assert(DimOutNum > 2 && "In case of the matrix multiplication the loop nest "
1282                           "and, consequently, the corresponding scheduling "
1283                           "functions have at least three dimensions.");
1284   Node = getBandNodeWithOriginDimOrder(Node);
1285   Node = permuteBandNodeDimensions(Node, MMI.i, DimOutNum - 3);
1286   int NewJ = MMI.j == DimOutNum - 3 ? MMI.i : MMI.j;
1287   int NewK = MMI.k == DimOutNum - 3 ? MMI.i : MMI.k;
1288   Node = permuteBandNodeDimensions(Node, NewJ, DimOutNum - 2);
1289   NewK = NewK == DimOutNum - 2 ? NewJ : NewK;
1290   Node = permuteBandNodeDimensions(Node, NewK, DimOutNum - 1);
1291   auto MicroKernelParams = getMicroKernelParams(TTI, MMI);
1292   auto MacroKernelParams = getMacroKernelParams(TTI, MicroKernelParams, MMI);
1293   Node = createMacroKernel(Node, MacroKernelParams);
1294   Node = createMicroKernel(Node, MicroKernelParams);
1295   if (MacroKernelParams.Mc == 1 || MacroKernelParams.Nc == 1 ||
1296       MacroKernelParams.Kc == 1)
1297     return Node;
1298   auto MapOldIndVar = getInductionVariablesSubstitution(Node, MicroKernelParams,
1299                                                         MacroKernelParams);
1300   if (!MapOldIndVar)
1301     return Node;
1302   Node = markLoopVectorizerDisabled(Node.parent()).child(0);
1303   Node = isolateAndUnrollMatMulInnerLoops(Node, MicroKernelParams);
1304   return optimizeDataLayoutMatrMulPattern(Node, MapOldIndVar, MicroKernelParams,
1305                                           MacroKernelParams, MMI);
1306 }
1307 
1308 bool ScheduleTreeOptimizer::isMatrMultPattern(isl::schedule_node Node,
1309                                               const Dependences *D,
1310                                               MatMulInfoTy &MMI) {
1311   auto PartialSchedule = isl::manage(
1312       isl_schedule_node_band_get_partial_schedule_union_map(Node.get()));
1313   Node = Node.child(0);
1314   auto LeafType = isl_schedule_node_get_type(Node.get());
1315   Node = Node.parent();
1316   if (LeafType != isl_schedule_node_leaf ||
1317       isl_schedule_node_band_n_member(Node.get()) < 3 ||
1318       Node.get_schedule_depth() != 0 ||
1319       isl_union_map_n_map(PartialSchedule.get()) != 1)
1320     return false;
1321   auto NewPartialSchedule = isl::map::from_union_map(PartialSchedule);
1322   if (containsMatrMult(NewPartialSchedule, D, MMI))
1323     return true;
1324   return false;
1325 }
1326 
1327 __isl_give isl_schedule_node *
1328 ScheduleTreeOptimizer::optimizeBand(__isl_take isl_schedule_node *Node,
1329                                     void *User) {
1330   if (!isTileableBandNode(isl::manage_copy(Node)))
1331     return Node;
1332 
1333   const OptimizerAdditionalInfoTy *OAI =
1334       static_cast<const OptimizerAdditionalInfoTy *>(User);
1335 
1336   MatMulInfoTy MMI;
1337   if (PMBasedOpts && User &&
1338       isMatrMultPattern(isl::manage_copy(Node), OAI->D, MMI)) {
1339     LLVM_DEBUG(dbgs() << "The matrix multiplication pattern was detected\n");
1340     MatMulOpts++;
1341     return optimizeMatMulPattern(isl::manage(Node), OAI->TTI, MMI).release();
1342   }
1343 
1344   return standardBandOpts(isl::manage(Node), User).release();
1345 }
1346 
1347 isl::schedule
1348 ScheduleTreeOptimizer::optimizeSchedule(isl::schedule Schedule,
1349                                         const OptimizerAdditionalInfoTy *OAI) {
1350   auto Root = Schedule.get_root();
1351   Root = optimizeScheduleNode(Root, OAI);
1352   return Root.get_schedule();
1353 }
1354 
1355 isl::schedule_node ScheduleTreeOptimizer::optimizeScheduleNode(
1356     isl::schedule_node Node, const OptimizerAdditionalInfoTy *OAI) {
1357   Node = isl::manage(isl_schedule_node_map_descendant_bottom_up(
1358       Node.release(), optimizeBand,
1359       const_cast<void *>(static_cast<const void *>(OAI))));
1360   return Node;
1361 }
1362 
1363 bool ScheduleTreeOptimizer::isProfitableSchedule(Scop &S,
1364                                                  isl::schedule NewSchedule) {
1365   // To understand if the schedule has been optimized we check if the schedule
1366   // has changed at all.
1367   // TODO: We can improve this by tracking if any necessarily beneficial
1368   // transformations have been performed. This can e.g. be tiling, loop
1369   // interchange, or ...) We can track this either at the place where the
1370   // transformation has been performed or, in case of automatic ILP based
1371   // optimizations, by comparing (yet to be defined) performance metrics
1372   // before/after the scheduling optimizer
1373   // (e.g., #stride-one accesses)
1374   if (S.containsExtensionNode(NewSchedule))
1375     return true;
1376   auto NewScheduleMap = NewSchedule.get_map();
1377   auto OldSchedule = S.getSchedule();
1378   assert(OldSchedule && "Only IslScheduleOptimizer can insert extension nodes "
1379                         "that make Scop::getSchedule() return nullptr.");
1380   bool changed = !OldSchedule.is_equal(NewScheduleMap);
1381   return changed;
1382 }
1383 
1384 namespace {
1385 
1386 class IslScheduleOptimizer : public ScopPass {
1387 public:
1388   static char ID;
1389 
1390   explicit IslScheduleOptimizer() : ScopPass(ID) {}
1391 
1392   ~IslScheduleOptimizer() override { isl_schedule_free(LastSchedule); }
1393 
1394   /// Optimize the schedule of the SCoP @p S.
1395   bool runOnScop(Scop &S) override;
1396 
1397   /// Print the new schedule for the SCoP @p S.
1398   void printScop(raw_ostream &OS, Scop &S) const override;
1399 
1400   /// Register all analyses and transformation required.
1401   void getAnalysisUsage(AnalysisUsage &AU) const override;
1402 
1403   /// Release the internal memory.
1404   void releaseMemory() override {
1405     isl_schedule_free(LastSchedule);
1406     LastSchedule = nullptr;
1407   }
1408 
1409 private:
1410   isl_schedule *LastSchedule = nullptr;
1411 };
1412 } // namespace
1413 
1414 char IslScheduleOptimizer::ID = 0;
1415 
1416 /// Collect statistics for the schedule tree.
1417 ///
1418 /// @param Schedule The schedule tree to analyze. If not a schedule tree it is
1419 /// ignored.
1420 /// @param Version  The version of the schedule tree that is analyzed.
1421 ///                 0 for the original schedule tree before any transformation.
1422 ///                 1 for the schedule tree after isl's rescheduling.
1423 ///                 2 for the schedule tree after optimizations are applied
1424 ///                 (tiling, pattern matching)
1425 static void walkScheduleTreeForStatistics(isl::schedule Schedule, int Version) {
1426   auto Root = Schedule.get_root();
1427   if (!Root)
1428     return;
1429 
1430   isl_schedule_node_foreach_descendant_top_down(
1431       Root.get(),
1432       [](__isl_keep isl_schedule_node *nodeptr, void *user) -> isl_bool {
1433         isl::schedule_node Node = isl::manage_copy(nodeptr);
1434         int Version = *static_cast<int *>(user);
1435 
1436         switch (isl_schedule_node_get_type(Node.get())) {
1437         case isl_schedule_node_band: {
1438           NumBands[Version]++;
1439           if (isl_schedule_node_band_get_permutable(Node.get()) ==
1440               isl_bool_true)
1441             NumPermutable[Version]++;
1442 
1443           int CountMembers = isl_schedule_node_band_n_member(Node.get());
1444           NumBandMembers[Version] += CountMembers;
1445           for (int i = 0; i < CountMembers; i += 1) {
1446             if (Node.band_member_get_coincident(i))
1447               NumCoincident[Version]++;
1448           }
1449           break;
1450         }
1451 
1452         case isl_schedule_node_filter:
1453           NumFilters[Version]++;
1454           break;
1455 
1456         case isl_schedule_node_extension:
1457           NumExtension[Version]++;
1458           break;
1459 
1460         default:
1461           break;
1462         }
1463 
1464         return isl_bool_true;
1465       },
1466       &Version);
1467 }
1468 
1469 bool IslScheduleOptimizer::runOnScop(Scop &S) {
1470   // Skip SCoPs in case they're already optimised by PPCGCodeGeneration
1471   if (S.isToBeSkipped())
1472     return false;
1473 
1474   // Skip empty SCoPs but still allow code generation as it will delete the
1475   // loops present but not needed.
1476   if (S.getSize() == 0) {
1477     S.markAsOptimized();
1478     return false;
1479   }
1480 
1481   const Dependences &D =
1482       getAnalysis<DependenceInfo>().getDependences(Dependences::AL_Statement);
1483 
1484   if (D.getSharedIslCtx() != S.getSharedIslCtx()) {
1485     LLVM_DEBUG(dbgs() << "DependenceInfo for another SCoP/isl_ctx\n");
1486     return false;
1487   }
1488 
1489   if (!D.hasValidDependences())
1490     return false;
1491 
1492   isl_schedule_free(LastSchedule);
1493   LastSchedule = nullptr;
1494 
1495   // Build input data.
1496   int ValidityKinds =
1497       Dependences::TYPE_RAW | Dependences::TYPE_WAR | Dependences::TYPE_WAW;
1498   int ProximityKinds;
1499 
1500   if (OptimizeDeps == "all")
1501     ProximityKinds =
1502         Dependences::TYPE_RAW | Dependences::TYPE_WAR | Dependences::TYPE_WAW;
1503   else if (OptimizeDeps == "raw")
1504     ProximityKinds = Dependences::TYPE_RAW;
1505   else {
1506     errs() << "Do not know how to optimize for '" << OptimizeDeps << "'"
1507            << " Falling back to optimizing all dependences.\n";
1508     ProximityKinds =
1509         Dependences::TYPE_RAW | Dependences::TYPE_WAR | Dependences::TYPE_WAW;
1510   }
1511 
1512   isl::union_set Domain = S.getDomains();
1513 
1514   if (!Domain)
1515     return false;
1516 
1517   ScopsProcessed++;
1518   walkScheduleTreeForStatistics(S.getScheduleTree(), 0);
1519 
1520   isl::union_map Validity = D.getDependences(ValidityKinds);
1521   isl::union_map Proximity = D.getDependences(ProximityKinds);
1522 
1523   // Simplify the dependences by removing the constraints introduced by the
1524   // domains. This can speed up the scheduling time significantly, as large
1525   // constant coefficients will be removed from the dependences. The
1526   // introduction of some additional dependences reduces the possible
1527   // transformations, but in most cases, such transformation do not seem to be
1528   // interesting anyway. In some cases this option may stop the scheduler to
1529   // find any schedule.
1530   if (SimplifyDeps == "yes") {
1531     Validity = Validity.gist_domain(Domain);
1532     Validity = Validity.gist_range(Domain);
1533     Proximity = Proximity.gist_domain(Domain);
1534     Proximity = Proximity.gist_range(Domain);
1535   } else if (SimplifyDeps != "no") {
1536     errs() << "warning: Option -polly-opt-simplify-deps should either be 'yes' "
1537               "or 'no'. Falling back to default: 'yes'\n";
1538   }
1539 
1540   LLVM_DEBUG(dbgs() << "\n\nCompute schedule from: ");
1541   LLVM_DEBUG(dbgs() << "Domain := " << Domain << ";\n");
1542   LLVM_DEBUG(dbgs() << "Proximity := " << Proximity << ";\n");
1543   LLVM_DEBUG(dbgs() << "Validity := " << Validity << ";\n");
1544 
1545   unsigned IslSerializeSCCs;
1546 
1547   if (FusionStrategy == "max") {
1548     IslSerializeSCCs = 0;
1549   } else if (FusionStrategy == "min") {
1550     IslSerializeSCCs = 1;
1551   } else {
1552     errs() << "warning: Unknown fusion strategy. Falling back to maximal "
1553               "fusion.\n";
1554     IslSerializeSCCs = 0;
1555   }
1556 
1557   int IslMaximizeBands;
1558 
1559   if (MaximizeBandDepth == "yes") {
1560     IslMaximizeBands = 1;
1561   } else if (MaximizeBandDepth == "no") {
1562     IslMaximizeBands = 0;
1563   } else {
1564     errs() << "warning: Option -polly-opt-maximize-bands should either be 'yes'"
1565               " or 'no'. Falling back to default: 'yes'\n";
1566     IslMaximizeBands = 1;
1567   }
1568 
1569   int IslOuterCoincidence;
1570 
1571   if (OuterCoincidence == "yes") {
1572     IslOuterCoincidence = 1;
1573   } else if (OuterCoincidence == "no") {
1574     IslOuterCoincidence = 0;
1575   } else {
1576     errs() << "warning: Option -polly-opt-outer-coincidence should either be "
1577               "'yes' or 'no'. Falling back to default: 'no'\n";
1578     IslOuterCoincidence = 0;
1579   }
1580 
1581   isl_ctx *Ctx = S.getIslCtx().get();
1582 
1583   isl_options_set_schedule_outer_coincidence(Ctx, IslOuterCoincidence);
1584   isl_options_set_schedule_serialize_sccs(Ctx, IslSerializeSCCs);
1585   isl_options_set_schedule_maximize_band_depth(Ctx, IslMaximizeBands);
1586   isl_options_set_schedule_max_constant_term(Ctx, MaxConstantTerm);
1587   isl_options_set_schedule_max_coefficient(Ctx, MaxCoefficient);
1588   isl_options_set_tile_scale_tile_loops(Ctx, 0);
1589 
1590   auto OnErrorStatus = isl_options_get_on_error(Ctx);
1591   isl_options_set_on_error(Ctx, ISL_ON_ERROR_CONTINUE);
1592 
1593   auto SC = isl::schedule_constraints::on_domain(Domain);
1594   SC = SC.set_proximity(Proximity);
1595   SC = SC.set_validity(Validity);
1596   SC = SC.set_coincidence(Validity);
1597   auto Schedule = SC.compute_schedule();
1598   isl_options_set_on_error(Ctx, OnErrorStatus);
1599 
1600   walkScheduleTreeForStatistics(Schedule, 1);
1601 
1602   // In cases the scheduler is not able to optimize the code, we just do not
1603   // touch the schedule.
1604   if (!Schedule)
1605     return false;
1606 
1607   ScopsRescheduled++;
1608 
1609   LLVM_DEBUG({
1610     auto *P = isl_printer_to_str(Ctx);
1611     P = isl_printer_set_yaml_style(P, ISL_YAML_STYLE_BLOCK);
1612     P = isl_printer_print_schedule(P, Schedule.get());
1613     auto *str = isl_printer_get_str(P);
1614     dbgs() << "NewScheduleTree: \n" << str << "\n";
1615     free(str);
1616     isl_printer_free(P);
1617   });
1618 
1619   Function &F = S.getFunction();
1620   auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
1621   const OptimizerAdditionalInfoTy OAI = {TTI, const_cast<Dependences *>(&D)};
1622   auto NewSchedule = ScheduleTreeOptimizer::optimizeSchedule(Schedule, &OAI);
1623   walkScheduleTreeForStatistics(NewSchedule, 2);
1624 
1625   if (!ScheduleTreeOptimizer::isProfitableSchedule(S, NewSchedule))
1626     return false;
1627 
1628   auto ScopStats = S.getStatistics();
1629   ScopsOptimized++;
1630   NumAffineLoopsOptimized += ScopStats.NumAffineLoops;
1631   NumBoxedLoopsOptimized += ScopStats.NumBoxedLoops;
1632 
1633   S.setScheduleTree(NewSchedule);
1634   S.markAsOptimized();
1635 
1636   if (OptimizedScops)
1637     errs() << S;
1638 
1639   return false;
1640 }
1641 
1642 void IslScheduleOptimizer::printScop(raw_ostream &OS, Scop &) const {
1643   isl_printer *p;
1644   char *ScheduleStr;
1645 
1646   OS << "Calculated schedule:\n";
1647 
1648   if (!LastSchedule) {
1649     OS << "n/a\n";
1650     return;
1651   }
1652 
1653   p = isl_printer_to_str(isl_schedule_get_ctx(LastSchedule));
1654   p = isl_printer_print_schedule(p, LastSchedule);
1655   ScheduleStr = isl_printer_get_str(p);
1656   isl_printer_free(p);
1657 
1658   OS << ScheduleStr << "\n";
1659 }
1660 
1661 void IslScheduleOptimizer::getAnalysisUsage(AnalysisUsage &AU) const {
1662   ScopPass::getAnalysisUsage(AU);
1663   AU.addRequired<DependenceInfo>();
1664   AU.addRequired<TargetTransformInfoWrapperPass>();
1665 
1666   AU.addPreserved<DependenceInfo>();
1667 }
1668 
1669 Pass *polly::createIslScheduleOptimizerPass() {
1670   return new IslScheduleOptimizer();
1671 }
1672 
1673 INITIALIZE_PASS_BEGIN(IslScheduleOptimizer, "polly-opt-isl",
1674                       "Polly - Optimize schedule of SCoP", false, false);
1675 INITIALIZE_PASS_DEPENDENCY(DependenceInfo);
1676 INITIALIZE_PASS_DEPENDENCY(ScopInfoRegionPass);
1677 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass);
1678 INITIALIZE_PASS_END(IslScheduleOptimizer, "polly-opt-isl",
1679                     "Polly - Optimize schedule of SCoP", false, false)
1680