1 //===- Schedule.cpp - Calculate an optimized schedule ---------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This pass generates an entirely new schedule tree from the data dependences
11 // and iteration domains. The new schedule tree is computed in two steps:
12 //
13 // 1) The isl scheduling optimizer is run
14 //
15 // The isl scheduling optimizer creates a new schedule tree that maximizes
16 // parallelism and tileability and minimizes data-dependence distances. The
17 // algorithm used is a modified version of the ``Pluto'' algorithm:
18 //
19 //   U. Bondhugula, A. Hartono, J. Ramanujam, and P. Sadayappan.
20 //   A Practical Automatic Polyhedral Parallelizer and Locality Optimizer.
21 //   In Proceedings of the 2008 ACM SIGPLAN Conference On Programming Language
22 //   Design and Implementation, PLDI ’08, pages 101–113. ACM, 2008.
23 //
24 // 2) A set of post-scheduling transformations is applied on the schedule tree.
25 //
26 // These optimizations include:
27 //
28 //  - Tiling of the innermost tilable bands
29 //  - Prevectorization - The choice of a possible outer loop that is strip-mined
30 //                       to the innermost level to enable inner-loop
31 //                       vectorization.
32 //  - Some optimizations for spatial locality are also planned.
33 //
34 // For a detailed description of the schedule tree itself please see section 6
35 // of:
36 //
37 // Polyhedral AST generation is more than scanning polyhedra
38 // Tobias Grosser, Sven Verdoolaege, Albert Cohen
39 // ACM Transactions on Programming Languages and Systems (TOPLAS),
40 // 37(4), July 2015
41 // http://www.grosser.es/#pub-polyhedral-AST-generation
42 //
43 // This publication also contains a detailed discussion of the different options
44 // for polyhedral loop unrolling, full/partial tile separation and other uses
45 // of the schedule tree.
46 //
47 //===----------------------------------------------------------------------===//
48 
49 #include "polly/ScheduleOptimizer.h"
50 #include "polly/CodeGen/CodeGeneration.h"
51 #include "polly/DependenceInfo.h"
52 #include "polly/LinkAllPasses.h"
53 #include "polly/Options.h"
54 #include "polly/ScopInfo.h"
55 #include "polly/ScopPass.h"
56 #include "polly/Simplify.h"
57 #include "polly/Support/GICHelper.h"
58 #include "polly/Support/ISLOStream.h"
59 #include "llvm/ADT/Statistic.h"
60 #include "llvm/Analysis/TargetTransformInfo.h"
61 #include "llvm/IR/Function.h"
62 #include "llvm/Pass.h"
63 #include "llvm/Support/CommandLine.h"
64 #include "llvm/Support/Debug.h"
65 #include "llvm/Support/raw_ostream.h"
66 #include "isl/constraint.h"
67 #include "isl/ctx.h"
68 #include "isl/map.h"
69 #include "isl/options.h"
70 #include "isl/printer.h"
71 #include "isl/schedule.h"
72 #include "isl/schedule_node.h"
73 #include "isl/space.h"
74 #include "isl/union_map.h"
75 #include "isl/union_set.h"
76 #include <algorithm>
77 #include <cassert>
78 #include <cmath>
79 #include <cstdint>
80 #include <cstdlib>
81 #include <string>
82 #include <vector>
83 
84 using namespace llvm;
85 using namespace polly;
86 
87 #define DEBUG_TYPE "polly-opt-isl"
88 
89 static cl::opt<std::string>
90     OptimizeDeps("polly-opt-optimize-only",
91                  cl::desc("Only a certain kind of dependences (all/raw)"),
92                  cl::Hidden, cl::init("all"), cl::ZeroOrMore,
93                  cl::cat(PollyCategory));
94 
95 static cl::opt<std::string>
96     SimplifyDeps("polly-opt-simplify-deps",
97                  cl::desc("Dependences should be simplified (yes/no)"),
98                  cl::Hidden, cl::init("yes"), cl::ZeroOrMore,
99                  cl::cat(PollyCategory));
100 
101 static cl::opt<int> MaxConstantTerm(
102     "polly-opt-max-constant-term",
103     cl::desc("The maximal constant term allowed (-1 is unlimited)"), cl::Hidden,
104     cl::init(20), cl::ZeroOrMore, cl::cat(PollyCategory));
105 
106 static cl::opt<int> MaxCoefficient(
107     "polly-opt-max-coefficient",
108     cl::desc("The maximal coefficient allowed (-1 is unlimited)"), cl::Hidden,
109     cl::init(20), cl::ZeroOrMore, cl::cat(PollyCategory));
110 
111 static cl::opt<std::string> FusionStrategy(
112     "polly-opt-fusion", cl::desc("The fusion strategy to choose (min/max)"),
113     cl::Hidden, cl::init("min"), cl::ZeroOrMore, cl::cat(PollyCategory));
114 
115 static cl::opt<std::string>
116     MaximizeBandDepth("polly-opt-maximize-bands",
117                       cl::desc("Maximize the band depth (yes/no)"), cl::Hidden,
118                       cl::init("yes"), cl::ZeroOrMore, cl::cat(PollyCategory));
119 
120 static cl::opt<std::string> OuterCoincidence(
121     "polly-opt-outer-coincidence",
122     cl::desc("Try to construct schedules where the outer member of each band "
123              "satisfies the coincidence constraints (yes/no)"),
124     cl::Hidden, cl::init("no"), cl::ZeroOrMore, cl::cat(PollyCategory));
125 
126 static cl::opt<int> PrevectorWidth(
127     "polly-prevect-width",
128     cl::desc(
129         "The number of loop iterations to strip-mine for pre-vectorization"),
130     cl::Hidden, cl::init(4), cl::ZeroOrMore, cl::cat(PollyCategory));
131 
132 static cl::opt<bool> FirstLevelTiling("polly-tiling",
133                                       cl::desc("Enable loop tiling"),
134                                       cl::init(true), cl::ZeroOrMore,
135                                       cl::cat(PollyCategory));
136 
137 static cl::opt<int> LatencyVectorFma(
138     "polly-target-latency-vector-fma",
139     cl::desc("The minimal number of cycles between issuing two "
140              "dependent consecutive vector fused multiply-add "
141              "instructions."),
142     cl::Hidden, cl::init(8), cl::ZeroOrMore, cl::cat(PollyCategory));
143 
144 static cl::opt<int> ThroughputVectorFma(
145     "polly-target-throughput-vector-fma",
146     cl::desc("A throughput of the processor floating-point arithmetic units "
147              "expressed in the number of vector fused multiply-add "
148              "instructions per clock cycle."),
149     cl::Hidden, cl::init(1), cl::ZeroOrMore, cl::cat(PollyCategory));
150 
151 // This option, along with --polly-target-2nd-cache-level-associativity,
152 // --polly-target-1st-cache-level-size, and --polly-target-2st-cache-level-size
153 // represent the parameters of the target cache, which do not have typical
154 // values that can be used by default. However, to apply the pattern matching
155 // optimizations, we use the values of the parameters of Intel Core i7-3820
156 // SandyBridge in case the parameters are not specified or not provided by the
157 // TargetTransformInfo.
158 static cl::opt<int> FirstCacheLevelAssociativity(
159     "polly-target-1st-cache-level-associativity",
160     cl::desc("The associativity of the first cache level."), cl::Hidden,
161     cl::init(-1), cl::ZeroOrMore, cl::cat(PollyCategory));
162 
163 static cl::opt<int> FirstCacheLevelDefaultAssociativity(
164     "polly-target-1st-cache-level-default-associativity",
165     cl::desc("The default associativity of the first cache level"
166              " (if not enough were provided by the TargetTransformInfo)."),
167     cl::Hidden, cl::init(8), cl::ZeroOrMore, cl::cat(PollyCategory));
168 
169 static cl::opt<int> SecondCacheLevelAssociativity(
170     "polly-target-2nd-cache-level-associativity",
171     cl::desc("The associativity of the second cache level."), cl::Hidden,
172     cl::init(-1), cl::ZeroOrMore, cl::cat(PollyCategory));
173 
174 static cl::opt<int> SecondCacheLevelDefaultAssociativity(
175     "polly-target-2nd-cache-level-default-associativity",
176     cl::desc("The default associativity of the second cache level"
177              " (if not enough were provided by the TargetTransformInfo)."),
178     cl::Hidden, cl::init(8), cl::ZeroOrMore, cl::cat(PollyCategory));
179 
180 static cl::opt<int> FirstCacheLevelSize(
181     "polly-target-1st-cache-level-size",
182     cl::desc("The size of the first cache level specified in bytes."),
183     cl::Hidden, cl::init(-1), cl::ZeroOrMore, cl::cat(PollyCategory));
184 
185 static cl::opt<int> FirstCacheLevelDefaultSize(
186     "polly-target-1st-cache-level-default-size",
187     cl::desc("The default size of the first cache level specified in bytes"
188              " (if not enough were provided by the TargetTransformInfo)."),
189     cl::Hidden, cl::init(32768), cl::ZeroOrMore, cl::cat(PollyCategory));
190 
191 static cl::opt<int> SecondCacheLevelSize(
192     "polly-target-2nd-cache-level-size",
193     cl::desc("The size of the second level specified in bytes."), cl::Hidden,
194     cl::init(-1), cl::ZeroOrMore, cl::cat(PollyCategory));
195 
196 static cl::opt<int> SecondCacheLevelDefaultSize(
197     "polly-target-2nd-cache-level-default-size",
198     cl::desc("The default size of the second cache level specified in bytes"
199              " (if not enough were provided by the TargetTransformInfo)."),
200     cl::Hidden, cl::init(262144), cl::ZeroOrMore, cl::cat(PollyCategory));
201 
202 static cl::opt<int> VectorRegisterBitwidth(
203     "polly-target-vector-register-bitwidth",
204     cl::desc("The size in bits of a vector register (if not set, this "
205              "information is taken from LLVM's target information."),
206     cl::Hidden, cl::init(-1), cl::ZeroOrMore, cl::cat(PollyCategory));
207 
208 static cl::opt<int> FirstLevelDefaultTileSize(
209     "polly-default-tile-size",
210     cl::desc("The default tile size (if not enough were provided by"
211              " --polly-tile-sizes)"),
212     cl::Hidden, cl::init(32), cl::ZeroOrMore, cl::cat(PollyCategory));
213 
214 static cl::list<int>
215     FirstLevelTileSizes("polly-tile-sizes",
216                         cl::desc("A tile size for each loop dimension, filled "
217                                  "with --polly-default-tile-size"),
218                         cl::Hidden, cl::ZeroOrMore, cl::CommaSeparated,
219                         cl::cat(PollyCategory));
220 
221 static cl::opt<bool>
222     SecondLevelTiling("polly-2nd-level-tiling",
223                       cl::desc("Enable a 2nd level loop of loop tiling"),
224                       cl::init(false), cl::ZeroOrMore, cl::cat(PollyCategory));
225 
226 static cl::opt<int> SecondLevelDefaultTileSize(
227     "polly-2nd-level-default-tile-size",
228     cl::desc("The default 2nd-level tile size (if not enough were provided by"
229              " --polly-2nd-level-tile-sizes)"),
230     cl::Hidden, cl::init(16), cl::ZeroOrMore, cl::cat(PollyCategory));
231 
232 static cl::list<int>
233     SecondLevelTileSizes("polly-2nd-level-tile-sizes",
234                          cl::desc("A tile size for each loop dimension, filled "
235                                   "with --polly-default-tile-size"),
236                          cl::Hidden, cl::ZeroOrMore, cl::CommaSeparated,
237                          cl::cat(PollyCategory));
238 
239 static cl::opt<bool> RegisterTiling("polly-register-tiling",
240                                     cl::desc("Enable register tiling"),
241                                     cl::init(false), cl::ZeroOrMore,
242                                     cl::cat(PollyCategory));
243 
244 static cl::opt<int> RegisterDefaultTileSize(
245     "polly-register-tiling-default-tile-size",
246     cl::desc("The default register tile size (if not enough were provided by"
247              " --polly-register-tile-sizes)"),
248     cl::Hidden, cl::init(2), cl::ZeroOrMore, cl::cat(PollyCategory));
249 
250 static cl::opt<int> PollyPatternMatchingNcQuotient(
251     "polly-pattern-matching-nc-quotient",
252     cl::desc("Quotient that is obtained by dividing Nc, the parameter of the"
253              "macro-kernel, by Nr, the parameter of the micro-kernel"),
254     cl::Hidden, cl::init(256), cl::ZeroOrMore, cl::cat(PollyCategory));
255 
256 static cl::list<int>
257     RegisterTileSizes("polly-register-tile-sizes",
258                       cl::desc("A tile size for each loop dimension, filled "
259                                "with --polly-register-tile-size"),
260                       cl::Hidden, cl::ZeroOrMore, cl::CommaSeparated,
261                       cl::cat(PollyCategory));
262 
263 static cl::opt<bool>
264     PMBasedOpts("polly-pattern-matching-based-opts",
265                 cl::desc("Perform optimizations based on pattern matching"),
266                 cl::init(true), cl::ZeroOrMore, cl::cat(PollyCategory));
267 
268 static cl::opt<bool> OptimizedScops(
269     "polly-optimized-scops",
270     cl::desc("Polly - Dump polyhedral description of Scops optimized with "
271              "the isl scheduling optimizer and the set of post-scheduling "
272              "transformations is applied on the schedule tree"),
273     cl::init(false), cl::ZeroOrMore, cl::cat(PollyCategory));
274 
275 STATISTIC(ScopsProcessed, "Number of scops processed");
276 STATISTIC(ScopsRescheduled, "Number of scops rescheduled");
277 STATISTIC(ScopsOptimized, "Number of scops optimized");
278 
279 STATISTIC(NumAffineLoopsOptimized, "Number of affine loops optimized");
280 STATISTIC(NumBoxedLoopsOptimized, "Number of boxed loops optimized");
281 
282 #define THREE_STATISTICS(VARNAME, DESC)                                        \
283   static Statistic VARNAME[3] = {                                              \
284       {DEBUG_TYPE, #VARNAME "0", DESC " (original)", {0}, {false}},            \
285       {DEBUG_TYPE, #VARNAME "1", DESC " (after scheduler)", {0}, {false}},     \
286       {DEBUG_TYPE, #VARNAME "2", DESC " (after optimizer)", {0}, {false}}}
287 
288 THREE_STATISTICS(NumBands, "Number of bands");
289 THREE_STATISTICS(NumBandMembers, "Number of band members");
290 THREE_STATISTICS(NumCoincident, "Number of coincident band members");
291 THREE_STATISTICS(NumPermutable, "Number of permutable bands");
292 THREE_STATISTICS(NumFilters, "Number of filter nodes");
293 THREE_STATISTICS(NumExtension, "Number of extension nodes");
294 
295 STATISTIC(FirstLevelTileOpts, "Number of first level tiling applied");
296 STATISTIC(SecondLevelTileOpts, "Number of second level tiling applied");
297 STATISTIC(RegisterTileOpts, "Number of register tiling applied");
298 STATISTIC(PrevectOpts, "Number of strip-mining for prevectorization applied");
299 STATISTIC(MatMulOpts,
300           "Number of matrix multiplication patterns detected and optimized");
301 
302 /// Create an isl::union_set, which describes the isolate option based on
303 /// IsolateDomain.
304 ///
305 /// @param IsolateDomain An isl::set whose @p OutDimsNum last dimensions should
306 ///                      belong to the current band node.
307 /// @param OutDimsNum    A number of dimensions that should belong to
308 ///                      the current band node.
309 static isl::union_set getIsolateOptions(isl::set IsolateDomain,
310                                         unsigned OutDimsNum) {
311   unsigned Dims = IsolateDomain.dim(isl::dim::set);
312   assert(OutDimsNum <= Dims &&
313          "The isl::set IsolateDomain is used to describe the range of schedule "
314          "dimensions values, which should be isolated. Consequently, the "
315          "number of its dimensions should be greater than or equal to the "
316          "number of the schedule dimensions.");
317   isl::map IsolateRelation = isl::map::from_domain(IsolateDomain);
318   IsolateRelation = IsolateRelation.move_dims(isl::dim::out, 0, isl::dim::in,
319                                               Dims - OutDimsNum, OutDimsNum);
320   isl::set IsolateOption = IsolateRelation.wrap();
321   isl::id Id = isl::id::alloc(IsolateOption.get_ctx(), "isolate", nullptr);
322   IsolateOption = IsolateOption.set_tuple_id(Id);
323   return isl::union_set(IsolateOption);
324 }
325 
326 namespace {
327 /// Create an isl::union_set, which describes the specified option for the
328 /// dimension of the current node.
329 ///
330 /// @param Ctx    An isl::ctx, which is used to create the isl::union_set.
331 /// @param Option The name of the option.
332 isl::union_set getDimOptions(isl::ctx Ctx, const char *Option) {
333   isl::space Space(Ctx, 0, 1);
334   auto DimOption = isl::set::universe(Space);
335   auto Id = isl::id::alloc(Ctx, Option, nullptr);
336   DimOption = DimOption.set_tuple_id(Id);
337   return isl::union_set(DimOption);
338 }
339 } // namespace
340 
341 /// Create an isl::union_set, which describes the option of the form
342 /// [isolate[] -> unroll[x]].
343 ///
344 /// @param Ctx An isl::ctx, which is used to create the isl::union_set.
345 static isl::union_set getUnrollIsolatedSetOptions(isl::ctx Ctx) {
346   isl::space Space = isl::space(Ctx, 0, 0, 1);
347   isl::map UnrollIsolatedSetOption = isl::map::universe(Space);
348   isl::id DimInId = isl::id::alloc(Ctx, "isolate", nullptr);
349   isl::id DimOutId = isl::id::alloc(Ctx, "unroll", nullptr);
350   UnrollIsolatedSetOption =
351       UnrollIsolatedSetOption.set_tuple_id(isl::dim::in, DimInId);
352   UnrollIsolatedSetOption =
353       UnrollIsolatedSetOption.set_tuple_id(isl::dim::out, DimOutId);
354   return UnrollIsolatedSetOption.wrap();
355 }
356 
357 /// Make the last dimension of Set to take values from 0 to VectorWidth - 1.
358 ///
359 /// @param Set         A set, which should be modified.
360 /// @param VectorWidth A parameter, which determines the constraint.
361 static isl::set addExtentConstraints(isl::set Set, int VectorWidth) {
362   unsigned Dims = Set.dim(isl::dim::set);
363   isl::space Space = Set.get_space();
364   isl::local_space LocalSpace = isl::local_space(Space);
365   isl::constraint ExtConstr = isl::constraint::alloc_inequality(LocalSpace);
366   ExtConstr = ExtConstr.set_constant_si(0);
367   ExtConstr = ExtConstr.set_coefficient_si(isl::dim::set, Dims - 1, 1);
368   Set = Set.add_constraint(ExtConstr);
369   ExtConstr = isl::constraint::alloc_inequality(LocalSpace);
370   ExtConstr = ExtConstr.set_constant_si(VectorWidth - 1);
371   ExtConstr = ExtConstr.set_coefficient_si(isl::dim::set, Dims - 1, -1);
372   return Set.add_constraint(ExtConstr);
373 }
374 
375 isl::set getPartialTilePrefixes(isl::set ScheduleRange, int VectorWidth) {
376   unsigned Dims = ScheduleRange.dim(isl::dim::set);
377   isl::set LoopPrefixes =
378       ScheduleRange.drop_constraints_involving_dims(isl::dim::set, Dims - 1, 1);
379   auto ExtentPrefixes = addExtentConstraints(LoopPrefixes, VectorWidth);
380   isl::set BadPrefixes = ExtentPrefixes.subtract(ScheduleRange);
381   BadPrefixes = BadPrefixes.project_out(isl::dim::set, Dims - 1, 1);
382   LoopPrefixes = LoopPrefixes.project_out(isl::dim::set, Dims - 1, 1);
383   return LoopPrefixes.subtract(BadPrefixes);
384 }
385 
386 isl::schedule_node
387 ScheduleTreeOptimizer::isolateFullPartialTiles(isl::schedule_node Node,
388                                                int VectorWidth) {
389   assert(isl_schedule_node_get_type(Node.get()) == isl_schedule_node_band);
390   Node = Node.child(0).child(0);
391   isl::union_map SchedRelUMap = Node.get_prefix_schedule_relation();
392   isl::map ScheduleRelation = isl::map::from_union_map(SchedRelUMap);
393   isl::set ScheduleRange = ScheduleRelation.range();
394   isl::set IsolateDomain = getPartialTilePrefixes(ScheduleRange, VectorWidth);
395   auto AtomicOption = getDimOptions(IsolateDomain.get_ctx(), "atomic");
396   isl::union_set IsolateOption = getIsolateOptions(IsolateDomain, 1);
397   Node = Node.parent().parent();
398   isl::union_set Options = IsolateOption.unite(AtomicOption);
399   Node = Node.band_set_ast_build_options(Options);
400   return Node;
401 }
402 
403 isl::schedule_node ScheduleTreeOptimizer::prevectSchedBand(
404     isl::schedule_node Node, unsigned DimToVectorize, int VectorWidth) {
405   assert(isl_schedule_node_get_type(Node.get()) == isl_schedule_node_band);
406 
407   auto Space = isl::manage(isl_schedule_node_band_get_space(Node.get()));
408   auto ScheduleDimensions = Space.dim(isl::dim::set);
409   assert(DimToVectorize < ScheduleDimensions);
410 
411   if (DimToVectorize > 0) {
412     Node = isl::manage(
413         isl_schedule_node_band_split(Node.release(), DimToVectorize));
414     Node = Node.child(0);
415   }
416   if (DimToVectorize < ScheduleDimensions - 1)
417     Node = isl::manage(isl_schedule_node_band_split(Node.release(), 1));
418   Space = isl::manage(isl_schedule_node_band_get_space(Node.get()));
419   auto Sizes = isl::multi_val::zero(Space);
420   Sizes = Sizes.set_val(0, isl::val(Node.get_ctx(), VectorWidth));
421   Node =
422       isl::manage(isl_schedule_node_band_tile(Node.release(), Sizes.release()));
423   Node = isolateFullPartialTiles(Node, VectorWidth);
424   Node = Node.child(0);
425   // Make sure the "trivially vectorizable loop" is not unrolled. Otherwise,
426   // we will have troubles to match it in the backend.
427   Node = Node.band_set_ast_build_options(
428       isl::union_set(Node.get_ctx(), "{ unroll[x]: 1 = 0 }"));
429   Node = isl::manage(isl_schedule_node_band_sink(Node.release()));
430   Node = Node.child(0);
431   if (isl_schedule_node_get_type(Node.get()) == isl_schedule_node_leaf)
432     Node = Node.parent();
433   auto LoopMarker = isl::id::alloc(Node.get_ctx(), "SIMD", nullptr);
434   PrevectOpts++;
435   return Node.insert_mark(LoopMarker);
436 }
437 
438 isl::schedule_node ScheduleTreeOptimizer::tileNode(isl::schedule_node Node,
439                                                    const char *Identifier,
440                                                    ArrayRef<int> TileSizes,
441                                                    int DefaultTileSize) {
442   auto Space = isl::manage(isl_schedule_node_band_get_space(Node.get()));
443   auto Dims = Space.dim(isl::dim::set);
444   auto Sizes = isl::multi_val::zero(Space);
445   std::string IdentifierString(Identifier);
446   for (unsigned i = 0; i < Dims; i++) {
447     auto tileSize = i < TileSizes.size() ? TileSizes[i] : DefaultTileSize;
448     Sizes = Sizes.set_val(i, isl::val(Node.get_ctx(), tileSize));
449   }
450   auto TileLoopMarkerStr = IdentifierString + " - Tiles";
451   auto TileLoopMarker =
452       isl::id::alloc(Node.get_ctx(), TileLoopMarkerStr, nullptr);
453   Node = Node.insert_mark(TileLoopMarker);
454   Node = Node.child(0);
455   Node =
456       isl::manage(isl_schedule_node_band_tile(Node.release(), Sizes.release()));
457   Node = Node.child(0);
458   auto PointLoopMarkerStr = IdentifierString + " - Points";
459   auto PointLoopMarker =
460       isl::id::alloc(Node.get_ctx(), PointLoopMarkerStr, nullptr);
461   Node = Node.insert_mark(PointLoopMarker);
462   return Node.child(0);
463 }
464 
465 isl::schedule_node ScheduleTreeOptimizer::applyRegisterTiling(
466     isl::schedule_node Node, ArrayRef<int> TileSizes, int DefaultTileSize) {
467   Node = tileNode(Node, "Register tiling", TileSizes, DefaultTileSize);
468   auto Ctx = Node.get_ctx();
469   return Node.band_set_ast_build_options(isl::union_set(Ctx, "{unroll[x]}"));
470 }
471 
472 static bool isSimpleInnermostBand(const isl::schedule_node &Node) {
473   assert(isl_schedule_node_get_type(Node.get()) == isl_schedule_node_band);
474   assert(isl_schedule_node_n_children(Node.get()) == 1);
475 
476   auto ChildType = isl_schedule_node_get_type(Node.child(0).get());
477 
478   if (ChildType == isl_schedule_node_leaf)
479     return true;
480 
481   if (ChildType != isl_schedule_node_sequence)
482     return false;
483 
484   auto Sequence = Node.child(0);
485 
486   for (int c = 0, nc = isl_schedule_node_n_children(Sequence.get()); c < nc;
487        ++c) {
488     auto Child = Sequence.child(c);
489     if (isl_schedule_node_get_type(Child.get()) != isl_schedule_node_filter)
490       return false;
491     if (isl_schedule_node_get_type(Child.child(0).get()) !=
492         isl_schedule_node_leaf)
493       return false;
494   }
495   return true;
496 }
497 
498 bool ScheduleTreeOptimizer::isTileableBandNode(isl::schedule_node Node) {
499   if (isl_schedule_node_get_type(Node.get()) != isl_schedule_node_band)
500     return false;
501 
502   if (isl_schedule_node_n_children(Node.get()) != 1)
503     return false;
504 
505   if (!isl_schedule_node_band_get_permutable(Node.get()))
506     return false;
507 
508   auto Space = isl::manage(isl_schedule_node_band_get_space(Node.get()));
509   auto Dims = Space.dim(isl::dim::set);
510 
511   if (Dims <= 1)
512     return false;
513 
514   return isSimpleInnermostBand(Node);
515 }
516 
517 __isl_give isl::schedule_node
518 ScheduleTreeOptimizer::standardBandOpts(isl::schedule_node Node, void *User) {
519   if (FirstLevelTiling) {
520     Node = tileNode(Node, "1st level tiling", FirstLevelTileSizes,
521                     FirstLevelDefaultTileSize);
522     FirstLevelTileOpts++;
523   }
524 
525   if (SecondLevelTiling) {
526     Node = tileNode(Node, "2nd level tiling", SecondLevelTileSizes,
527                     SecondLevelDefaultTileSize);
528     SecondLevelTileOpts++;
529   }
530 
531   if (RegisterTiling) {
532     Node =
533         applyRegisterTiling(Node, RegisterTileSizes, RegisterDefaultTileSize);
534     RegisterTileOpts++;
535   }
536 
537   if (PollyVectorizerChoice == VECTORIZER_NONE)
538     return Node;
539 
540   auto Space = isl::manage(isl_schedule_node_band_get_space(Node.get()));
541   auto Dims = Space.dim(isl::dim::set);
542 
543   for (int i = Dims - 1; i >= 0; i--)
544     if (Node.band_member_get_coincident(i)) {
545       Node = prevectSchedBand(Node, i, PrevectorWidth);
546       break;
547     }
548 
549   return Node;
550 }
551 
552 /// Permute the two dimensions of the isl map.
553 ///
554 /// Permute @p DstPos and @p SrcPos dimensions of the isl map @p Map that
555 /// have type @p DimType.
556 ///
557 /// @param Map     The isl map to be modified.
558 /// @param DimType The type of the dimensions.
559 /// @param DstPos  The first dimension.
560 /// @param SrcPos  The second dimension.
561 /// @return        The modified map.
562 isl::map permuteDimensions(isl::map Map, isl::dim DimType, unsigned DstPos,
563                            unsigned SrcPos) {
564   assert(DstPos < Map.dim(DimType) && SrcPos < Map.dim(DimType));
565   if (DstPos == SrcPos)
566     return Map;
567   isl::id DimId;
568   if (Map.has_tuple_id(DimType))
569     DimId = Map.get_tuple_id(DimType);
570   auto FreeDim = DimType == isl::dim::in ? isl::dim::out : isl::dim::in;
571   isl::id FreeDimId;
572   if (Map.has_tuple_id(FreeDim))
573     FreeDimId = Map.get_tuple_id(FreeDim);
574   auto MaxDim = std::max(DstPos, SrcPos);
575   auto MinDim = std::min(DstPos, SrcPos);
576   Map = Map.move_dims(FreeDim, 0, DimType, MaxDim, 1);
577   Map = Map.move_dims(FreeDim, 0, DimType, MinDim, 1);
578   Map = Map.move_dims(DimType, MinDim, FreeDim, 1, 1);
579   Map = Map.move_dims(DimType, MaxDim, FreeDim, 0, 1);
580   if (DimId)
581     Map = Map.set_tuple_id(DimType, DimId);
582   if (FreeDimId)
583     Map = Map.set_tuple_id(FreeDim, FreeDimId);
584   return Map;
585 }
586 
587 /// Check the form of the access relation.
588 ///
589 /// Check that the access relation @p AccMap has the form M[i][j], where i
590 /// is a @p FirstPos and j is a @p SecondPos.
591 ///
592 /// @param AccMap    The access relation to be checked.
593 /// @param FirstPos  The index of the input dimension that is mapped to
594 ///                  the first output dimension.
595 /// @param SecondPos The index of the input dimension that is mapped to the
596 ///                  second output dimension.
597 /// @return          True in case @p AccMap has the expected form and false,
598 ///                  otherwise.
599 static bool isMatMulOperandAcc(isl::set Domain, isl::map AccMap, int &FirstPos,
600                                int &SecondPos) {
601   isl::space Space = AccMap.get_space();
602   isl::map Universe = isl::map::universe(Space);
603 
604   if (Space.dim(isl::dim::out) != 2)
605     return false;
606 
607   // MatMul has the form:
608   // for (i = 0; i < N; i++)
609   //   for (j = 0; j < M; j++)
610   //     for (k = 0; k < P; k++)
611   //       C[i, j] += A[i, k] * B[k, j]
612   //
613   // Permutation of three outer loops: 3! = 6 possibilities.
614   int FirstDims[] = {0, 0, 1, 1, 2, 2};
615   int SecondDims[] = {1, 2, 2, 0, 0, 1};
616   for (int i = 0; i < 6; i += 1) {
617     auto PossibleMatMul =
618         Universe.equate(isl::dim::in, FirstDims[i], isl::dim::out, 0)
619             .equate(isl::dim::in, SecondDims[i], isl::dim::out, 1);
620 
621     AccMap = AccMap.intersect_domain(Domain);
622     PossibleMatMul = PossibleMatMul.intersect_domain(Domain);
623 
624     // If AccMap spans entire domain (Non-partial write),
625     // compute FirstPos and SecondPos.
626     // If AccMap != PossibleMatMul here (the two maps have been gisted at
627     // this point), it means that the writes are not complete, or in other
628     // words, it is a Partial write and Partial writes must be rejected.
629     if (AccMap.is_equal(PossibleMatMul)) {
630       if (FirstPos != -1 && FirstPos != FirstDims[i])
631         continue;
632       FirstPos = FirstDims[i];
633       if (SecondPos != -1 && SecondPos != SecondDims[i])
634         continue;
635       SecondPos = SecondDims[i];
636       return true;
637     }
638   }
639 
640   return false;
641 }
642 
643 /// Does the memory access represent a non-scalar operand of the matrix
644 /// multiplication.
645 ///
646 /// Check that the memory access @p MemAccess is the read access to a non-scalar
647 /// operand of the matrix multiplication or its result.
648 ///
649 /// @param MemAccess The memory access to be checked.
650 /// @param MMI       Parameters of the matrix multiplication operands.
651 /// @return          True in case the memory access represents the read access
652 ///                  to a non-scalar operand of the matrix multiplication and
653 ///                  false, otherwise.
654 static bool isMatMulNonScalarReadAccess(MemoryAccess *MemAccess,
655                                         MatMulInfoTy &MMI) {
656   if (!MemAccess->isLatestArrayKind() || !MemAccess->isRead())
657     return false;
658   auto AccMap = MemAccess->getLatestAccessRelation();
659   isl::set StmtDomain = MemAccess->getStatement()->getDomain();
660   if (isMatMulOperandAcc(StmtDomain, AccMap, MMI.i, MMI.j) && !MMI.ReadFromC) {
661     MMI.ReadFromC = MemAccess;
662     return true;
663   }
664   if (isMatMulOperandAcc(StmtDomain, AccMap, MMI.i, MMI.k) && !MMI.A) {
665     MMI.A = MemAccess;
666     return true;
667   }
668   if (isMatMulOperandAcc(StmtDomain, AccMap, MMI.k, MMI.j) && !MMI.B) {
669     MMI.B = MemAccess;
670     return true;
671   }
672   return false;
673 }
674 
675 /// Check accesses to operands of the matrix multiplication.
676 ///
677 /// Check that accesses of the SCoP statement, which corresponds to
678 /// the partial schedule @p PartialSchedule, are scalar in terms of loops
679 /// containing the matrix multiplication, in case they do not represent
680 /// accesses to the non-scalar operands of the matrix multiplication or
681 /// its result.
682 ///
683 /// @param  PartialSchedule The partial schedule of the SCoP statement.
684 /// @param  MMI             Parameters of the matrix multiplication operands.
685 /// @return                 True in case the corresponding SCoP statement
686 ///                         represents matrix multiplication and false,
687 ///                         otherwise.
688 static bool containsOnlyMatrMultAcc(isl::map PartialSchedule,
689                                     MatMulInfoTy &MMI) {
690   auto InputDimId = PartialSchedule.get_tuple_id(isl::dim::in);
691   auto *Stmt = static_cast<ScopStmt *>(InputDimId.get_user());
692   unsigned OutDimNum = PartialSchedule.dim(isl::dim::out);
693   assert(OutDimNum > 2 && "In case of the matrix multiplication the loop nest "
694                           "and, consequently, the corresponding scheduling "
695                           "functions have at least three dimensions.");
696   auto MapI =
697       permuteDimensions(PartialSchedule, isl::dim::out, MMI.i, OutDimNum - 1);
698   auto MapJ =
699       permuteDimensions(PartialSchedule, isl::dim::out, MMI.j, OutDimNum - 1);
700   auto MapK =
701       permuteDimensions(PartialSchedule, isl::dim::out, MMI.k, OutDimNum - 1);
702 
703   auto Accesses = getAccessesInOrder(*Stmt);
704   for (auto *MemA = Accesses.begin(); MemA != Accesses.end() - 1; MemA++) {
705     auto *MemAccessPtr = *MemA;
706     if (MemAccessPtr->isLatestArrayKind() && MemAccessPtr != MMI.WriteToC &&
707         !isMatMulNonScalarReadAccess(MemAccessPtr, MMI) &&
708         !(MemAccessPtr->isStrideZero(MapI)) &&
709         MemAccessPtr->isStrideZero(MapJ) && MemAccessPtr->isStrideZero(MapK))
710       return false;
711   }
712   return true;
713 }
714 
715 /// Check for dependencies corresponding to the matrix multiplication.
716 ///
717 /// Check that there is only true dependence of the form
718 /// S(..., k, ...) -> S(..., k + 1, …), where S is the SCoP statement
719 /// represented by @p Schedule and k is @p Pos. Such a dependence corresponds
720 /// to the dependency produced by the matrix multiplication.
721 ///
722 /// @param  Schedule The schedule of the SCoP statement.
723 /// @param  D The SCoP dependencies.
724 /// @param  Pos The parameter to describe an acceptable true dependence.
725 ///             In case it has a negative value, try to determine its
726 ///             acceptable value.
727 /// @return True in case dependencies correspond to the matrix multiplication
728 ///         and false, otherwise.
729 static bool containsOnlyMatMulDep(isl::map Schedule, const Dependences *D,
730                                   int &Pos) {
731   isl::union_map Dep = D->getDependences(Dependences::TYPE_RAW);
732   isl::union_map Red = D->getDependences(Dependences::TYPE_RED);
733   if (Red)
734     Dep = Dep.unite(Red);
735   auto DomainSpace = Schedule.get_space().domain();
736   auto Space = DomainSpace.map_from_domain_and_range(DomainSpace);
737   auto Deltas = Dep.extract_map(Space).deltas();
738   int DeltasDimNum = Deltas.dim(isl::dim::set);
739   for (int i = 0; i < DeltasDimNum; i++) {
740     auto Val = Deltas.plain_get_val_if_fixed(isl::dim::set, i);
741     Pos = Pos < 0 && Val.is_one() ? i : Pos;
742     if (Val.is_nan() || !(Val.is_zero() || (i == Pos && Val.is_one())))
743       return false;
744   }
745   if (DeltasDimNum == 0 || Pos < 0)
746     return false;
747   return true;
748 }
749 
750 /// Check if the SCoP statement could probably be optimized with analytical
751 /// modeling.
752 ///
753 /// containsMatrMult tries to determine whether the following conditions
754 /// are true:
755 /// 1. The last memory access modeling an array, MA1, represents writing to
756 ///    memory and has the form S(..., i1, ..., i2, ...) -> M(i1, i2) or
757 ///    S(..., i2, ..., i1, ...) -> M(i1, i2), where S is the SCoP statement
758 ///    under consideration.
759 /// 2. There is only one loop-carried true dependency, and it has the
760 ///    form S(..., i3, ...) -> S(..., i3 + 1, ...), and there are no
761 ///    loop-carried or anti dependencies.
762 /// 3. SCoP contains three access relations, MA2, MA3, and MA4 that represent
763 ///    reading from memory and have the form S(..., i3, ...) -> M(i1, i3),
764 ///    S(..., i3, ...) -> M(i3, i2), S(...) -> M(i1, i2), respectively,
765 ///    and all memory accesses of the SCoP that are different from MA1, MA2,
766 ///    MA3, and MA4 have stride 0, if the innermost loop is exchanged with any
767 ///    of loops i1, i2 and i3.
768 ///
769 /// @param PartialSchedule The PartialSchedule that contains a SCoP statement
770 ///        to check.
771 /// @D     The SCoP dependencies.
772 /// @MMI   Parameters of the matrix multiplication operands.
773 static bool containsMatrMult(isl::map PartialSchedule, const Dependences *D,
774                              MatMulInfoTy &MMI) {
775   auto InputDimsId = PartialSchedule.get_tuple_id(isl::dim::in);
776   auto *Stmt = static_cast<ScopStmt *>(InputDimsId.get_user());
777   if (Stmt->size() <= 1)
778     return false;
779 
780   auto Accesses = getAccessesInOrder(*Stmt);
781   for (auto *MemA = Accesses.end() - 1; MemA != Accesses.begin(); MemA--) {
782     auto *MemAccessPtr = *MemA;
783     if (!MemAccessPtr->isLatestArrayKind())
784       continue;
785     if (!MemAccessPtr->isWrite())
786       return false;
787     auto AccMap = MemAccessPtr->getLatestAccessRelation();
788     if (!isMatMulOperandAcc(Stmt->getDomain(), AccMap, MMI.i, MMI.j))
789       return false;
790     MMI.WriteToC = MemAccessPtr;
791     break;
792   }
793 
794   if (!containsOnlyMatMulDep(PartialSchedule, D, MMI.k))
795     return false;
796 
797   if (!MMI.WriteToC || !containsOnlyMatrMultAcc(PartialSchedule, MMI))
798     return false;
799 
800   if (!MMI.A || !MMI.B || !MMI.ReadFromC)
801     return false;
802   return true;
803 }
804 
805 /// Permute two dimensions of the band node.
806 ///
807 /// Permute FirstDim and SecondDim dimensions of the Node.
808 ///
809 /// @param Node The band node to be modified.
810 /// @param FirstDim The first dimension to be permuted.
811 /// @param SecondDim The second dimension to be permuted.
812 static isl::schedule_node permuteBandNodeDimensions(isl::schedule_node Node,
813                                                     unsigned FirstDim,
814                                                     unsigned SecondDim) {
815   assert(isl_schedule_node_get_type(Node.get()) == isl_schedule_node_band &&
816          isl_schedule_node_band_n_member(Node.get()) >
817              std::max(FirstDim, SecondDim));
818   auto PartialSchedule =
819       isl::manage(isl_schedule_node_band_get_partial_schedule(Node.get()));
820   auto PartialScheduleFirstDim = PartialSchedule.get_union_pw_aff(FirstDim);
821   auto PartialScheduleSecondDim = PartialSchedule.get_union_pw_aff(SecondDim);
822   PartialSchedule =
823       PartialSchedule.set_union_pw_aff(SecondDim, PartialScheduleFirstDim);
824   PartialSchedule =
825       PartialSchedule.set_union_pw_aff(FirstDim, PartialScheduleSecondDim);
826   Node = isl::manage(isl_schedule_node_delete(Node.release()));
827   return Node.insert_partial_schedule(PartialSchedule);
828 }
829 
830 isl::schedule_node ScheduleTreeOptimizer::createMicroKernel(
831     isl::schedule_node Node, MicroKernelParamsTy MicroKernelParams) {
832   Node = applyRegisterTiling(Node, {MicroKernelParams.Mr, MicroKernelParams.Nr},
833                              1);
834   Node = Node.parent().parent();
835   return permuteBandNodeDimensions(Node, 0, 1).child(0).child(0);
836 }
837 
838 isl::schedule_node ScheduleTreeOptimizer::createMacroKernel(
839     isl::schedule_node Node, MacroKernelParamsTy MacroKernelParams) {
840   assert(isl_schedule_node_get_type(Node.get()) == isl_schedule_node_band);
841   if (MacroKernelParams.Mc == 1 && MacroKernelParams.Nc == 1 &&
842       MacroKernelParams.Kc == 1)
843     return Node;
844   int DimOutNum = isl_schedule_node_band_n_member(Node.get());
845   std::vector<int> TileSizes(DimOutNum, 1);
846   TileSizes[DimOutNum - 3] = MacroKernelParams.Mc;
847   TileSizes[DimOutNum - 2] = MacroKernelParams.Nc;
848   TileSizes[DimOutNum - 1] = MacroKernelParams.Kc;
849   Node = tileNode(Node, "1st level tiling", TileSizes, 1);
850   Node = Node.parent().parent();
851   Node = permuteBandNodeDimensions(Node, DimOutNum - 2, DimOutNum - 1);
852   Node = permuteBandNodeDimensions(Node, DimOutNum - 3, DimOutNum - 1);
853   return Node.child(0).child(0);
854 }
855 
856 /// Get the size of the widest type of the matrix multiplication operands
857 /// in bytes, including alignment padding.
858 ///
859 /// @param MMI Parameters of the matrix multiplication operands.
860 /// @return The size of the widest type of the matrix multiplication operands
861 ///         in bytes, including alignment padding.
862 static uint64_t getMatMulAlignTypeSize(MatMulInfoTy MMI) {
863   auto *S = MMI.A->getStatement()->getParent();
864   auto &DL = S->getFunction().getParent()->getDataLayout();
865   auto ElementSizeA = DL.getTypeAllocSize(MMI.A->getElementType());
866   auto ElementSizeB = DL.getTypeAllocSize(MMI.B->getElementType());
867   auto ElementSizeC = DL.getTypeAllocSize(MMI.WriteToC->getElementType());
868   return std::max({ElementSizeA, ElementSizeB, ElementSizeC});
869 }
870 
871 /// Get the size of the widest type of the matrix multiplication operands
872 /// in bits.
873 ///
874 /// @param MMI Parameters of the matrix multiplication operands.
875 /// @return The size of the widest type of the matrix multiplication operands
876 ///         in bits.
877 static uint64_t getMatMulTypeSize(MatMulInfoTy MMI) {
878   auto *S = MMI.A->getStatement()->getParent();
879   auto &DL = S->getFunction().getParent()->getDataLayout();
880   auto ElementSizeA = DL.getTypeSizeInBits(MMI.A->getElementType());
881   auto ElementSizeB = DL.getTypeSizeInBits(MMI.B->getElementType());
882   auto ElementSizeC = DL.getTypeSizeInBits(MMI.WriteToC->getElementType());
883   return std::max({ElementSizeA, ElementSizeB, ElementSizeC});
884 }
885 
886 /// Get parameters of the BLIS micro kernel.
887 ///
888 /// We choose the Mr and Nr parameters of the micro kernel to be large enough
889 /// such that no stalls caused by the combination of latencies and dependencies
890 /// are introduced during the updates of the resulting matrix of the matrix
891 /// multiplication. However, they should also be as small as possible to
892 /// release more registers for entries of multiplied matrices.
893 ///
894 /// @param TTI Target Transform Info.
895 /// @param MMI Parameters of the matrix multiplication operands.
896 /// @return The structure of type MicroKernelParamsTy.
897 /// @see MicroKernelParamsTy
898 static struct MicroKernelParamsTy
899 getMicroKernelParams(const TargetTransformInfo *TTI, MatMulInfoTy MMI) {
900   assert(TTI && "The target transform info should be provided.");
901 
902   // Nvec - Number of double-precision floating-point numbers that can be hold
903   // by a vector register. Use 2 by default.
904   long RegisterBitwidth = VectorRegisterBitwidth;
905 
906   if (RegisterBitwidth == -1)
907     RegisterBitwidth = TTI->getRegisterBitWidth(true);
908   auto ElementSize = getMatMulTypeSize(MMI);
909   assert(ElementSize > 0 && "The element size of the matrix multiplication "
910                             "operands should be greater than zero.");
911   auto Nvec = RegisterBitwidth / ElementSize;
912   if (Nvec == 0)
913     Nvec = 2;
914   int Nr =
915       ceil(sqrt(Nvec * LatencyVectorFma * ThroughputVectorFma) / Nvec) * Nvec;
916   int Mr = ceil(Nvec * LatencyVectorFma * ThroughputVectorFma / Nr);
917   return {Mr, Nr};
918 }
919 
920 namespace {
921 /// Determine parameters of the target cache.
922 ///
923 /// @param TTI Target Transform Info.
924 void getTargetCacheParameters(const llvm::TargetTransformInfo *TTI) {
925   auto L1DCache = llvm::TargetTransformInfo::CacheLevel::L1D;
926   auto L2DCache = llvm::TargetTransformInfo::CacheLevel::L2D;
927   if (FirstCacheLevelSize == -1) {
928     if (TTI->getCacheSize(L1DCache).hasValue())
929       FirstCacheLevelSize = TTI->getCacheSize(L1DCache).getValue();
930     else
931       FirstCacheLevelSize = static_cast<int>(FirstCacheLevelDefaultSize);
932   }
933   if (SecondCacheLevelSize == -1) {
934     if (TTI->getCacheSize(L2DCache).hasValue())
935       SecondCacheLevelSize = TTI->getCacheSize(L2DCache).getValue();
936     else
937       SecondCacheLevelSize = static_cast<int>(SecondCacheLevelDefaultSize);
938   }
939   if (FirstCacheLevelAssociativity == -1) {
940     if (TTI->getCacheAssociativity(L1DCache).hasValue())
941       FirstCacheLevelAssociativity =
942           TTI->getCacheAssociativity(L1DCache).getValue();
943     else
944       FirstCacheLevelAssociativity =
945           static_cast<int>(FirstCacheLevelDefaultAssociativity);
946   }
947   if (SecondCacheLevelAssociativity == -1) {
948     if (TTI->getCacheAssociativity(L2DCache).hasValue())
949       SecondCacheLevelAssociativity =
950           TTI->getCacheAssociativity(L2DCache).getValue();
951     else
952       SecondCacheLevelAssociativity =
953           static_cast<int>(SecondCacheLevelDefaultAssociativity);
954   }
955 }
956 } // namespace
957 
958 /// Get parameters of the BLIS macro kernel.
959 ///
960 /// During the computation of matrix multiplication, blocks of partitioned
961 /// matrices are mapped to different layers of the memory hierarchy.
962 /// To optimize data reuse, blocks should be ideally kept in cache between
963 /// iterations. Since parameters of the macro kernel determine sizes of these
964 /// blocks, there are upper and lower bounds on these parameters.
965 ///
966 /// @param TTI Target Transform Info.
967 /// @param MicroKernelParams Parameters of the micro-kernel
968 ///                          to be taken into account.
969 /// @param MMI Parameters of the matrix multiplication operands.
970 /// @return The structure of type MacroKernelParamsTy.
971 /// @see MacroKernelParamsTy
972 /// @see MicroKernelParamsTy
973 static struct MacroKernelParamsTy
974 getMacroKernelParams(const llvm::TargetTransformInfo *TTI,
975                      const MicroKernelParamsTy &MicroKernelParams,
976                      MatMulInfoTy MMI) {
977   getTargetCacheParameters(TTI);
978   // According to www.cs.utexas.edu/users/flame/pubs/TOMS-BLIS-Analytical.pdf,
979   // it requires information about the first two levels of a cache to determine
980   // all the parameters of a macro-kernel. It also checks that an associativity
981   // degree of a cache level is greater than two. Otherwise, another algorithm
982   // for determination of the parameters should be used.
983   if (!(MicroKernelParams.Mr > 0 && MicroKernelParams.Nr > 0 &&
984         FirstCacheLevelSize > 0 && SecondCacheLevelSize > 0 &&
985         FirstCacheLevelAssociativity > 2 && SecondCacheLevelAssociativity > 2))
986     return {1, 1, 1};
987   // The quotient should be greater than zero.
988   if (PollyPatternMatchingNcQuotient <= 0)
989     return {1, 1, 1};
990   int Car = floor(
991       (FirstCacheLevelAssociativity - 1) /
992       (1 + static_cast<double>(MicroKernelParams.Nr) / MicroKernelParams.Mr));
993 
994   // Car can be computed to be zero since it is floor to int.
995   // On Mac OS, division by 0 does not raise a signal. This causes negative
996   // tile sizes to be computed. Prevent division by Cac==0 by early returning
997   // if this happens.
998   if (Car == 0)
999     return {1, 1, 1};
1000 
1001   auto ElementSize = getMatMulAlignTypeSize(MMI);
1002   assert(ElementSize > 0 && "The element size of the matrix multiplication "
1003                             "operands should be greater than zero.");
1004   int Kc = (Car * FirstCacheLevelSize) /
1005            (MicroKernelParams.Mr * FirstCacheLevelAssociativity * ElementSize);
1006   double Cac =
1007       static_cast<double>(Kc * ElementSize * SecondCacheLevelAssociativity) /
1008       SecondCacheLevelSize;
1009   int Mc = floor((SecondCacheLevelAssociativity - 2) / Cac);
1010   int Nc = PollyPatternMatchingNcQuotient * MicroKernelParams.Nr;
1011 
1012   assert(Mc > 0 && Nc > 0 && Kc > 0 &&
1013          "Matrix block sizes should be  greater than zero");
1014   return {Mc, Nc, Kc};
1015 }
1016 
1017 /// Create an access relation that is specific to
1018 ///        the matrix multiplication pattern.
1019 ///
1020 /// Create an access relation of the following form:
1021 /// [O0, O1, O2, O3, O4, O5, O6, O7, O8] -> [OI, O5, OJ]
1022 /// where I is @p FirstDim, J is @p SecondDim.
1023 ///
1024 /// It can be used, for example, to create relations that helps to consequently
1025 /// access elements of operands of a matrix multiplication after creation of
1026 /// the BLIS micro and macro kernels.
1027 ///
1028 /// @see ScheduleTreeOptimizer::createMicroKernel
1029 /// @see ScheduleTreeOptimizer::createMacroKernel
1030 ///
1031 /// Subsequently, the described access relation is applied to the range of
1032 /// @p MapOldIndVar, that is used to map original induction variables to
1033 /// the ones, which are produced by schedule transformations. It helps to
1034 /// define relations using a new space and, at the same time, keep them
1035 /// in the original one.
1036 ///
1037 /// @param MapOldIndVar The relation, which maps original induction variables
1038 ///                     to the ones, which are produced by schedule
1039 ///                     transformations.
1040 /// @param FirstDim, SecondDim The input dimensions that are used to define
1041 ///        the specified access relation.
1042 /// @return The specified access relation.
1043 isl::map getMatMulAccRel(isl::map MapOldIndVar, unsigned FirstDim,
1044                          unsigned SecondDim) {
1045   auto AccessRelSpace = isl::space(MapOldIndVar.get_ctx(), 0, 9, 3);
1046   auto AccessRel = isl::map::universe(AccessRelSpace);
1047   AccessRel = AccessRel.equate(isl::dim::in, FirstDim, isl::dim::out, 0);
1048   AccessRel = AccessRel.equate(isl::dim::in, 5, isl::dim::out, 1);
1049   AccessRel = AccessRel.equate(isl::dim::in, SecondDim, isl::dim::out, 2);
1050   return MapOldIndVar.apply_range(AccessRel);
1051 }
1052 
1053 isl::schedule_node createExtensionNode(isl::schedule_node Node,
1054                                        isl::map ExtensionMap) {
1055   auto Extension = isl::union_map(ExtensionMap);
1056   auto NewNode = isl::schedule_node::from_extension(Extension);
1057   return Node.graft_before(NewNode);
1058 }
1059 
1060 /// Apply the packing transformation.
1061 ///
1062 /// The packing transformation can be described as a data-layout
1063 /// transformation that requires to introduce a new array, copy data
1064 /// to the array, and change memory access locations to reference the array.
1065 /// It can be used to ensure that elements of the new array are read in-stride
1066 /// access, aligned to cache lines boundaries, and preloaded into certain cache
1067 /// levels.
1068 ///
1069 /// As an example let us consider the packing of the array A that would help
1070 /// to read its elements with in-stride access. An access to the array A
1071 /// is represented by an access relation that has the form
1072 /// S[i, j, k] -> A[i, k]. The scheduling function of the SCoP statement S has
1073 /// the form S[i,j, k] -> [floor((j mod Nc) / Nr), floor((i mod Mc) / Mr),
1074 /// k mod Kc, j mod Nr, i mod Mr].
1075 ///
1076 /// To ensure that elements of the array A are read in-stride access, we add
1077 /// a new array Packed_A[Mc/Mr][Kc][Mr] to the SCoP, using
1078 /// Scop::createScopArrayInfo, change the access relation
1079 /// S[i, j, k] -> A[i, k] to
1080 /// S[i, j, k] -> Packed_A[floor((i mod Mc) / Mr), k mod Kc, i mod Mr], using
1081 /// MemoryAccess::setNewAccessRelation, and copy the data to the array, using
1082 /// the copy statement created by Scop::addScopStmt.
1083 ///
1084 /// @param Node The schedule node to be optimized.
1085 /// @param MapOldIndVar The relation, which maps original induction variables
1086 ///                     to the ones, which are produced by schedule
1087 ///                     transformations.
1088 /// @param MicroParams, MacroParams Parameters of the BLIS kernel
1089 ///                                 to be taken into account.
1090 /// @param MMI Parameters of the matrix multiplication operands.
1091 /// @return The optimized schedule node.
1092 static isl::schedule_node
1093 optimizeDataLayoutMatrMulPattern(isl::schedule_node Node, isl::map MapOldIndVar,
1094                                  MicroKernelParamsTy MicroParams,
1095                                  MacroKernelParamsTy MacroParams,
1096                                  MatMulInfoTy &MMI) {
1097   auto InputDimsId = MapOldIndVar.get_tuple_id(isl::dim::in);
1098   auto *Stmt = static_cast<ScopStmt *>(InputDimsId.get_user());
1099 
1100   // Create a copy statement that corresponds to the memory access to the
1101   // matrix B, the second operand of the matrix multiplication.
1102   Node = Node.parent().parent().parent().parent().parent().parent();
1103   Node = isl::manage(isl_schedule_node_band_split(Node.release(), 2)).child(0);
1104   auto AccRel = getMatMulAccRel(MapOldIndVar, 3, 7);
1105   unsigned FirstDimSize = MacroParams.Nc / MicroParams.Nr;
1106   unsigned SecondDimSize = MacroParams.Kc;
1107   unsigned ThirdDimSize = MicroParams.Nr;
1108   auto *SAI = Stmt->getParent()->createScopArrayInfo(
1109       MMI.B->getElementType(), "Packed_B",
1110       {FirstDimSize, SecondDimSize, ThirdDimSize});
1111   AccRel = AccRel.set_tuple_id(isl::dim::out, SAI->getBasePtrId());
1112   auto OldAcc = MMI.B->getLatestAccessRelation();
1113   MMI.B->setNewAccessRelation(AccRel);
1114   auto ExtMap = MapOldIndVar.project_out(isl::dim::out, 2,
1115                                          MapOldIndVar.dim(isl::dim::out) - 2);
1116   ExtMap = ExtMap.reverse();
1117   ExtMap = ExtMap.fix_si(isl::dim::out, MMI.i, 0);
1118   auto Domain = Stmt->getDomain();
1119 
1120   // Restrict the domains of the copy statements to only execute when also its
1121   // originating statement is executed.
1122   auto DomainId = Domain.get_tuple_id();
1123   auto *NewStmt = Stmt->getParent()->addScopStmt(
1124       OldAcc, MMI.B->getLatestAccessRelation(), Domain);
1125   ExtMap = ExtMap.set_tuple_id(isl::dim::out, DomainId);
1126   ExtMap = ExtMap.intersect_range(Domain);
1127   ExtMap = ExtMap.set_tuple_id(isl::dim::out, NewStmt->getDomainId());
1128   Node = createExtensionNode(Node, ExtMap);
1129 
1130   // Create a copy statement that corresponds to the memory access
1131   // to the matrix A, the first operand of the matrix multiplication.
1132   Node = Node.child(0);
1133   AccRel = getMatMulAccRel(MapOldIndVar, 4, 6);
1134   FirstDimSize = MacroParams.Mc / MicroParams.Mr;
1135   ThirdDimSize = MicroParams.Mr;
1136   SAI = Stmt->getParent()->createScopArrayInfo(
1137       MMI.A->getElementType(), "Packed_A",
1138       {FirstDimSize, SecondDimSize, ThirdDimSize});
1139   AccRel = AccRel.set_tuple_id(isl::dim::out, SAI->getBasePtrId());
1140   OldAcc = MMI.A->getLatestAccessRelation();
1141   MMI.A->setNewAccessRelation(AccRel);
1142   ExtMap = MapOldIndVar.project_out(isl::dim::out, 3,
1143                                     MapOldIndVar.dim(isl::dim::out) - 3);
1144   ExtMap = ExtMap.reverse();
1145   ExtMap = ExtMap.fix_si(isl::dim::out, MMI.j, 0);
1146   NewStmt = Stmt->getParent()->addScopStmt(
1147       OldAcc, MMI.A->getLatestAccessRelation(), Domain);
1148 
1149   // Restrict the domains of the copy statements to only execute when also its
1150   // originating statement is executed.
1151   ExtMap = ExtMap.set_tuple_id(isl::dim::out, DomainId);
1152   ExtMap = ExtMap.intersect_range(Domain);
1153   ExtMap = ExtMap.set_tuple_id(isl::dim::out, NewStmt->getDomainId());
1154   Node = createExtensionNode(Node, ExtMap);
1155   return Node.child(0).child(0).child(0).child(0).child(0);
1156 }
1157 
1158 /// Get a relation mapping induction variables produced by schedule
1159 /// transformations to the original ones.
1160 ///
1161 /// @param Node The schedule node produced as the result of creation
1162 ///        of the BLIS kernels.
1163 /// @param MicroKernelParams, MacroKernelParams Parameters of the BLIS kernel
1164 ///                                             to be taken into account.
1165 /// @return  The relation mapping original induction variables to the ones
1166 ///          produced by schedule transformation.
1167 /// @see ScheduleTreeOptimizer::createMicroKernel
1168 /// @see ScheduleTreeOptimizer::createMacroKernel
1169 /// @see getMacroKernelParams
1170 isl::map
1171 getInductionVariablesSubstitution(isl::schedule_node Node,
1172                                   MicroKernelParamsTy MicroKernelParams,
1173                                   MacroKernelParamsTy MacroKernelParams) {
1174   auto Child = Node.child(0);
1175   auto UnMapOldIndVar = Child.get_prefix_schedule_union_map();
1176   auto MapOldIndVar = isl::map::from_union_map(UnMapOldIndVar);
1177   if (MapOldIndVar.dim(isl::dim::out) > 9)
1178     return MapOldIndVar.project_out(isl::dim::out, 0,
1179                                     MapOldIndVar.dim(isl::dim::out) - 9);
1180   return MapOldIndVar;
1181 }
1182 
1183 /// Isolate a set of partial tile prefixes and unroll the isolated part.
1184 ///
1185 /// The set should ensure that it contains only partial tile prefixes that have
1186 /// exactly Mr x Nr iterations of the two innermost loops produced by
1187 /// the optimization of the matrix multiplication. Mr and Nr are parameters of
1188 /// the micro-kernel.
1189 ///
1190 /// In case of parametric bounds, this helps to auto-vectorize the unrolled
1191 /// innermost loops, using the SLP vectorizer.
1192 ///
1193 /// @param Node              The schedule node to be modified.
1194 /// @param MicroKernelParams Parameters of the micro-kernel
1195 ///                          to be taken into account.
1196 /// @return The modified isl_schedule_node.
1197 static isl::schedule_node
1198 isolateAndUnrollMatMulInnerLoops(isl::schedule_node Node,
1199                                  struct MicroKernelParamsTy MicroKernelParams) {
1200   isl::schedule_node Child = Node.get_child(0);
1201   isl::union_map UnMapOldIndVar = Child.get_prefix_schedule_relation();
1202   isl::set Prefix = isl::map::from_union_map(UnMapOldIndVar).range();
1203   unsigned Dims = Prefix.dim(isl::dim::set);
1204   Prefix = Prefix.project_out(isl::dim::set, Dims - 1, 1);
1205   Prefix = getPartialTilePrefixes(Prefix, MicroKernelParams.Nr);
1206   Prefix = getPartialTilePrefixes(Prefix, MicroKernelParams.Mr);
1207 
1208   isl::union_set IsolateOption =
1209       getIsolateOptions(Prefix.add_dims(isl::dim::set, 3), 3);
1210   isl::ctx Ctx = Node.get_ctx();
1211   auto Options = IsolateOption.unite(getDimOptions(Ctx, "unroll"));
1212   Options = Options.unite(getUnrollIsolatedSetOptions(Ctx));
1213   Node = Node.band_set_ast_build_options(Options);
1214   Node = Node.parent().parent().parent();
1215   IsolateOption = getIsolateOptions(Prefix, 3);
1216   Options = IsolateOption.unite(getDimOptions(Ctx, "separate"));
1217   Node = Node.band_set_ast_build_options(Options);
1218   Node = Node.child(0).child(0).child(0);
1219   return Node;
1220 }
1221 
1222 /// Mark @p BasePtr with "Inter iteration alias-free" mark node.
1223 ///
1224 /// @param Node The child of the mark node to be inserted.
1225 /// @param BasePtr The pointer to be marked.
1226 /// @return The modified isl_schedule_node.
1227 static isl::schedule_node markInterIterationAliasFree(isl::schedule_node Node,
1228                                                       Value *BasePtr) {
1229   if (!BasePtr)
1230     return Node;
1231 
1232   auto Id =
1233       isl::id::alloc(Node.get_ctx(), "Inter iteration alias-free", BasePtr);
1234   return Node.insert_mark(Id).child(0);
1235 }
1236 
1237 /// Insert "Loop Vectorizer Disabled" mark node.
1238 ///
1239 /// @param Node The child of the mark node to be inserted.
1240 /// @return The modified isl_schedule_node.
1241 static isl::schedule_node markLoopVectorizerDisabled(isl::schedule_node Node) {
1242   auto Id = isl::id::alloc(Node.get_ctx(), "Loop Vectorizer Disabled", nullptr);
1243   return Node.insert_mark(Id).child(0);
1244 }
1245 
1246 /// Restore the initial ordering of dimensions of the band node
1247 ///
1248 /// In case the band node represents all the dimensions of the iteration
1249 /// domain, recreate the band node to restore the initial ordering of the
1250 /// dimensions.
1251 ///
1252 /// @param Node The band node to be modified.
1253 /// @return The modified schedule node.
1254 static isl::schedule_node
1255 getBandNodeWithOriginDimOrder(isl::schedule_node Node) {
1256   assert(isl_schedule_node_get_type(Node.get()) == isl_schedule_node_band);
1257   if (isl_schedule_node_get_type(Node.child(0).get()) != isl_schedule_node_leaf)
1258     return Node;
1259   auto Domain = Node.get_universe_domain();
1260   assert(isl_union_set_n_set(Domain.get()) == 1);
1261   if (Node.get_schedule_depth() != 0 ||
1262       (isl::set(Domain).dim(isl::dim::set) !=
1263        isl_schedule_node_band_n_member(Node.get())))
1264     return Node;
1265   Node = isl::manage(isl_schedule_node_delete(Node.copy()));
1266   auto PartialSchedulePwAff = Domain.identity_union_pw_multi_aff();
1267   auto PartialScheduleMultiPwAff =
1268       isl::multi_union_pw_aff(PartialSchedulePwAff);
1269   PartialScheduleMultiPwAff =
1270       PartialScheduleMultiPwAff.reset_tuple_id(isl::dim::set);
1271   return Node.insert_partial_schedule(PartialScheduleMultiPwAff);
1272 }
1273 
1274 isl::schedule_node
1275 ScheduleTreeOptimizer::optimizeMatMulPattern(isl::schedule_node Node,
1276                                              const TargetTransformInfo *TTI,
1277                                              MatMulInfoTy &MMI) {
1278   assert(TTI && "The target transform info should be provided.");
1279   Node = markInterIterationAliasFree(
1280       Node, MMI.WriteToC->getLatestScopArrayInfo()->getBasePtr());
1281   int DimOutNum = isl_schedule_node_band_n_member(Node.get());
1282   assert(DimOutNum > 2 && "In case of the matrix multiplication the loop nest "
1283                           "and, consequently, the corresponding scheduling "
1284                           "functions have at least three dimensions.");
1285   Node = getBandNodeWithOriginDimOrder(Node);
1286   Node = permuteBandNodeDimensions(Node, MMI.i, DimOutNum - 3);
1287   int NewJ = MMI.j == DimOutNum - 3 ? MMI.i : MMI.j;
1288   int NewK = MMI.k == DimOutNum - 3 ? MMI.i : MMI.k;
1289   Node = permuteBandNodeDimensions(Node, NewJ, DimOutNum - 2);
1290   NewK = NewK == DimOutNum - 2 ? NewJ : NewK;
1291   Node = permuteBandNodeDimensions(Node, NewK, DimOutNum - 1);
1292   auto MicroKernelParams = getMicroKernelParams(TTI, MMI);
1293   auto MacroKernelParams = getMacroKernelParams(TTI, MicroKernelParams, MMI);
1294   Node = createMacroKernel(Node, MacroKernelParams);
1295   Node = createMicroKernel(Node, MicroKernelParams);
1296   if (MacroKernelParams.Mc == 1 || MacroKernelParams.Nc == 1 ||
1297       MacroKernelParams.Kc == 1)
1298     return Node;
1299   auto MapOldIndVar = getInductionVariablesSubstitution(Node, MicroKernelParams,
1300                                                         MacroKernelParams);
1301   if (!MapOldIndVar)
1302     return Node;
1303   Node = markLoopVectorizerDisabled(Node.parent()).child(0);
1304   Node = isolateAndUnrollMatMulInnerLoops(Node, MicroKernelParams);
1305   return optimizeDataLayoutMatrMulPattern(Node, MapOldIndVar, MicroKernelParams,
1306                                           MacroKernelParams, MMI);
1307 }
1308 
1309 bool ScheduleTreeOptimizer::isMatrMultPattern(isl::schedule_node Node,
1310                                               const Dependences *D,
1311                                               MatMulInfoTy &MMI) {
1312   auto PartialSchedule = isl::manage(
1313       isl_schedule_node_band_get_partial_schedule_union_map(Node.get()));
1314   Node = Node.child(0);
1315   auto LeafType = isl_schedule_node_get_type(Node.get());
1316   Node = Node.parent();
1317   if (LeafType != isl_schedule_node_leaf ||
1318       isl_schedule_node_band_n_member(Node.get()) < 3 ||
1319       Node.get_schedule_depth() != 0 ||
1320       isl_union_map_n_map(PartialSchedule.get()) != 1)
1321     return false;
1322   auto NewPartialSchedule = isl::map::from_union_map(PartialSchedule);
1323   if (containsMatrMult(NewPartialSchedule, D, MMI))
1324     return true;
1325   return false;
1326 }
1327 
1328 __isl_give isl_schedule_node *
1329 ScheduleTreeOptimizer::optimizeBand(__isl_take isl_schedule_node *Node,
1330                                     void *User) {
1331   if (!isTileableBandNode(isl::manage_copy(Node)))
1332     return Node;
1333 
1334   const OptimizerAdditionalInfoTy *OAI =
1335       static_cast<const OptimizerAdditionalInfoTy *>(User);
1336 
1337   MatMulInfoTy MMI;
1338   if (PMBasedOpts && User &&
1339       isMatrMultPattern(isl::manage_copy(Node), OAI->D, MMI)) {
1340     LLVM_DEBUG(dbgs() << "The matrix multiplication pattern was detected\n");
1341     MatMulOpts++;
1342     return optimizeMatMulPattern(isl::manage(Node), OAI->TTI, MMI).release();
1343   }
1344 
1345   return standardBandOpts(isl::manage(Node), User).release();
1346 }
1347 
1348 isl::schedule
1349 ScheduleTreeOptimizer::optimizeSchedule(isl::schedule Schedule,
1350                                         const OptimizerAdditionalInfoTy *OAI) {
1351   auto Root = Schedule.get_root();
1352   Root = optimizeScheduleNode(Root, OAI);
1353   return Root.get_schedule();
1354 }
1355 
1356 isl::schedule_node ScheduleTreeOptimizer::optimizeScheduleNode(
1357     isl::schedule_node Node, const OptimizerAdditionalInfoTy *OAI) {
1358   Node = isl::manage(isl_schedule_node_map_descendant_bottom_up(
1359       Node.release(), optimizeBand,
1360       const_cast<void *>(static_cast<const void *>(OAI))));
1361   return Node;
1362 }
1363 
1364 bool ScheduleTreeOptimizer::isProfitableSchedule(Scop &S,
1365                                                  isl::schedule NewSchedule) {
1366   // To understand if the schedule has been optimized we check if the schedule
1367   // has changed at all.
1368   // TODO: We can improve this by tracking if any necessarily beneficial
1369   // transformations have been performed. This can e.g. be tiling, loop
1370   // interchange, or ...) We can track this either at the place where the
1371   // transformation has been performed or, in case of automatic ILP based
1372   // optimizations, by comparing (yet to be defined) performance metrics
1373   // before/after the scheduling optimizer
1374   // (e.g., #stride-one accesses)
1375   if (S.containsExtensionNode(NewSchedule))
1376     return true;
1377   auto NewScheduleMap = NewSchedule.get_map();
1378   auto OldSchedule = S.getSchedule();
1379   assert(OldSchedule && "Only IslScheduleOptimizer can insert extension nodes "
1380                         "that make Scop::getSchedule() return nullptr.");
1381   bool changed = !OldSchedule.is_equal(NewScheduleMap);
1382   return changed;
1383 }
1384 
1385 namespace {
1386 
1387 class IslScheduleOptimizer : public ScopPass {
1388 public:
1389   static char ID;
1390 
1391   explicit IslScheduleOptimizer() : ScopPass(ID) {}
1392 
1393   ~IslScheduleOptimizer() override { isl_schedule_free(LastSchedule); }
1394 
1395   /// Optimize the schedule of the SCoP @p S.
1396   bool runOnScop(Scop &S) override;
1397 
1398   /// Print the new schedule for the SCoP @p S.
1399   void printScop(raw_ostream &OS, Scop &S) const override;
1400 
1401   /// Register all analyses and transformation required.
1402   void getAnalysisUsage(AnalysisUsage &AU) const override;
1403 
1404   /// Release the internal memory.
1405   void releaseMemory() override {
1406     isl_schedule_free(LastSchedule);
1407     LastSchedule = nullptr;
1408   }
1409 
1410 private:
1411   isl_schedule *LastSchedule = nullptr;
1412 };
1413 } // namespace
1414 
1415 char IslScheduleOptimizer::ID = 0;
1416 
1417 /// Collect statistics for the schedule tree.
1418 ///
1419 /// @param Schedule The schedule tree to analyze. If not a schedule tree it is
1420 /// ignored.
1421 /// @param Version  The version of the schedule tree that is analyzed.
1422 ///                 0 for the original schedule tree before any transformation.
1423 ///                 1 for the schedule tree after isl's rescheduling.
1424 ///                 2 for the schedule tree after optimizations are applied
1425 ///                 (tiling, pattern matching)
1426 static void walkScheduleTreeForStatistics(isl::schedule Schedule, int Version) {
1427   auto Root = Schedule.get_root();
1428   if (!Root)
1429     return;
1430 
1431   isl_schedule_node_foreach_descendant_top_down(
1432       Root.get(),
1433       [](__isl_keep isl_schedule_node *nodeptr, void *user) -> isl_bool {
1434         isl::schedule_node Node = isl::manage_copy(nodeptr);
1435         int Version = *static_cast<int *>(user);
1436 
1437         switch (isl_schedule_node_get_type(Node.get())) {
1438         case isl_schedule_node_band: {
1439           NumBands[Version]++;
1440           if (isl_schedule_node_band_get_permutable(Node.get()) ==
1441               isl_bool_true)
1442             NumPermutable[Version]++;
1443 
1444           int CountMembers = isl_schedule_node_band_n_member(Node.get());
1445           NumBandMembers[Version] += CountMembers;
1446           for (int i = 0; i < CountMembers; i += 1) {
1447             if (Node.band_member_get_coincident(i))
1448               NumCoincident[Version]++;
1449           }
1450           break;
1451         }
1452 
1453         case isl_schedule_node_filter:
1454           NumFilters[Version]++;
1455           break;
1456 
1457         case isl_schedule_node_extension:
1458           NumExtension[Version]++;
1459           break;
1460 
1461         default:
1462           break;
1463         }
1464 
1465         return isl_bool_true;
1466       },
1467       &Version);
1468 }
1469 
1470 bool IslScheduleOptimizer::runOnScop(Scop &S) {
1471   // Skip SCoPs in case they're already optimised by PPCGCodeGeneration
1472   if (S.isToBeSkipped())
1473     return false;
1474 
1475   // Skip empty SCoPs but still allow code generation as it will delete the
1476   // loops present but not needed.
1477   if (S.getSize() == 0) {
1478     S.markAsOptimized();
1479     return false;
1480   }
1481 
1482   const Dependences &D =
1483       getAnalysis<DependenceInfo>().getDependences(Dependences::AL_Statement);
1484 
1485   if (D.getSharedIslCtx() != S.getSharedIslCtx()) {
1486     LLVM_DEBUG(dbgs() << "DependenceInfo for another SCoP/isl_ctx\n");
1487     return false;
1488   }
1489 
1490   if (!D.hasValidDependences())
1491     return false;
1492 
1493   isl_schedule_free(LastSchedule);
1494   LastSchedule = nullptr;
1495 
1496   // Build input data.
1497   int ValidityKinds =
1498       Dependences::TYPE_RAW | Dependences::TYPE_WAR | Dependences::TYPE_WAW;
1499   int ProximityKinds;
1500 
1501   if (OptimizeDeps == "all")
1502     ProximityKinds =
1503         Dependences::TYPE_RAW | Dependences::TYPE_WAR | Dependences::TYPE_WAW;
1504   else if (OptimizeDeps == "raw")
1505     ProximityKinds = Dependences::TYPE_RAW;
1506   else {
1507     errs() << "Do not know how to optimize for '" << OptimizeDeps << "'"
1508            << " Falling back to optimizing all dependences.\n";
1509     ProximityKinds =
1510         Dependences::TYPE_RAW | Dependences::TYPE_WAR | Dependences::TYPE_WAW;
1511   }
1512 
1513   isl::union_set Domain = S.getDomains();
1514 
1515   if (!Domain)
1516     return false;
1517 
1518   ScopsProcessed++;
1519   walkScheduleTreeForStatistics(S.getScheduleTree(), 0);
1520 
1521   isl::union_map Validity = D.getDependences(ValidityKinds);
1522   isl::union_map Proximity = D.getDependences(ProximityKinds);
1523 
1524   // Simplify the dependences by removing the constraints introduced by the
1525   // domains. This can speed up the scheduling time significantly, as large
1526   // constant coefficients will be removed from the dependences. The
1527   // introduction of some additional dependences reduces the possible
1528   // transformations, but in most cases, such transformation do not seem to be
1529   // interesting anyway. In some cases this option may stop the scheduler to
1530   // find any schedule.
1531   if (SimplifyDeps == "yes") {
1532     Validity = Validity.gist_domain(Domain);
1533     Validity = Validity.gist_range(Domain);
1534     Proximity = Proximity.gist_domain(Domain);
1535     Proximity = Proximity.gist_range(Domain);
1536   } else if (SimplifyDeps != "no") {
1537     errs() << "warning: Option -polly-opt-simplify-deps should either be 'yes' "
1538               "or 'no'. Falling back to default: 'yes'\n";
1539   }
1540 
1541   LLVM_DEBUG(dbgs() << "\n\nCompute schedule from: ");
1542   LLVM_DEBUG(dbgs() << "Domain := " << Domain << ";\n");
1543   LLVM_DEBUG(dbgs() << "Proximity := " << Proximity << ";\n");
1544   LLVM_DEBUG(dbgs() << "Validity := " << Validity << ";\n");
1545 
1546   unsigned IslSerializeSCCs;
1547 
1548   if (FusionStrategy == "max") {
1549     IslSerializeSCCs = 0;
1550   } else if (FusionStrategy == "min") {
1551     IslSerializeSCCs = 1;
1552   } else {
1553     errs() << "warning: Unknown fusion strategy. Falling back to maximal "
1554               "fusion.\n";
1555     IslSerializeSCCs = 0;
1556   }
1557 
1558   int IslMaximizeBands;
1559 
1560   if (MaximizeBandDepth == "yes") {
1561     IslMaximizeBands = 1;
1562   } else if (MaximizeBandDepth == "no") {
1563     IslMaximizeBands = 0;
1564   } else {
1565     errs() << "warning: Option -polly-opt-maximize-bands should either be 'yes'"
1566               " or 'no'. Falling back to default: 'yes'\n";
1567     IslMaximizeBands = 1;
1568   }
1569 
1570   int IslOuterCoincidence;
1571 
1572   if (OuterCoincidence == "yes") {
1573     IslOuterCoincidence = 1;
1574   } else if (OuterCoincidence == "no") {
1575     IslOuterCoincidence = 0;
1576   } else {
1577     errs() << "warning: Option -polly-opt-outer-coincidence should either be "
1578               "'yes' or 'no'. Falling back to default: 'no'\n";
1579     IslOuterCoincidence = 0;
1580   }
1581 
1582   isl_ctx *Ctx = S.getIslCtx().get();
1583 
1584   isl_options_set_schedule_outer_coincidence(Ctx, IslOuterCoincidence);
1585   isl_options_set_schedule_serialize_sccs(Ctx, IslSerializeSCCs);
1586   isl_options_set_schedule_maximize_band_depth(Ctx, IslMaximizeBands);
1587   isl_options_set_schedule_max_constant_term(Ctx, MaxConstantTerm);
1588   isl_options_set_schedule_max_coefficient(Ctx, MaxCoefficient);
1589   isl_options_set_tile_scale_tile_loops(Ctx, 0);
1590 
1591   auto OnErrorStatus = isl_options_get_on_error(Ctx);
1592   isl_options_set_on_error(Ctx, ISL_ON_ERROR_CONTINUE);
1593 
1594   auto SC = isl::schedule_constraints::on_domain(Domain);
1595   SC = SC.set_proximity(Proximity);
1596   SC = SC.set_validity(Validity);
1597   SC = SC.set_coincidence(Validity);
1598   auto Schedule = SC.compute_schedule();
1599   isl_options_set_on_error(Ctx, OnErrorStatus);
1600 
1601   walkScheduleTreeForStatistics(Schedule, 1);
1602 
1603   // In cases the scheduler is not able to optimize the code, we just do not
1604   // touch the schedule.
1605   if (!Schedule)
1606     return false;
1607 
1608   ScopsRescheduled++;
1609 
1610   LLVM_DEBUG({
1611     auto *P = isl_printer_to_str(Ctx);
1612     P = isl_printer_set_yaml_style(P, ISL_YAML_STYLE_BLOCK);
1613     P = isl_printer_print_schedule(P, Schedule.get());
1614     auto *str = isl_printer_get_str(P);
1615     dbgs() << "NewScheduleTree: \n" << str << "\n";
1616     free(str);
1617     isl_printer_free(P);
1618   });
1619 
1620   Function &F = S.getFunction();
1621   auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
1622   const OptimizerAdditionalInfoTy OAI = {TTI, const_cast<Dependences *>(&D)};
1623   auto NewSchedule = ScheduleTreeOptimizer::optimizeSchedule(Schedule, &OAI);
1624   walkScheduleTreeForStatistics(NewSchedule, 2);
1625 
1626   if (!ScheduleTreeOptimizer::isProfitableSchedule(S, NewSchedule))
1627     return false;
1628 
1629   auto ScopStats = S.getStatistics();
1630   ScopsOptimized++;
1631   NumAffineLoopsOptimized += ScopStats.NumAffineLoops;
1632   NumBoxedLoopsOptimized += ScopStats.NumBoxedLoops;
1633 
1634   S.setScheduleTree(NewSchedule);
1635   S.markAsOptimized();
1636 
1637   if (OptimizedScops)
1638     errs() << S;
1639 
1640   return false;
1641 }
1642 
1643 void IslScheduleOptimizer::printScop(raw_ostream &OS, Scop &) const {
1644   isl_printer *p;
1645   char *ScheduleStr;
1646 
1647   OS << "Calculated schedule:\n";
1648 
1649   if (!LastSchedule) {
1650     OS << "n/a\n";
1651     return;
1652   }
1653 
1654   p = isl_printer_to_str(isl_schedule_get_ctx(LastSchedule));
1655   p = isl_printer_print_schedule(p, LastSchedule);
1656   ScheduleStr = isl_printer_get_str(p);
1657   isl_printer_free(p);
1658 
1659   OS << ScheduleStr << "\n";
1660 }
1661 
1662 void IslScheduleOptimizer::getAnalysisUsage(AnalysisUsage &AU) const {
1663   ScopPass::getAnalysisUsage(AU);
1664   AU.addRequired<DependenceInfo>();
1665   AU.addRequired<TargetTransformInfoWrapperPass>();
1666 
1667   AU.addPreserved<DependenceInfo>();
1668 }
1669 
1670 Pass *polly::createIslScheduleOptimizerPass() {
1671   return new IslScheduleOptimizer();
1672 }
1673 
1674 INITIALIZE_PASS_BEGIN(IslScheduleOptimizer, "polly-opt-isl",
1675                       "Polly - Optimize schedule of SCoP", false, false);
1676 INITIALIZE_PASS_DEPENDENCY(DependenceInfo);
1677 INITIALIZE_PASS_DEPENDENCY(ScopInfoRegionPass);
1678 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass);
1679 INITIALIZE_PASS_END(IslScheduleOptimizer, "polly-opt-isl",
1680                     "Polly - Optimize schedule of SCoP", false, false)
1681