1 //===- Schedule.cpp - Calculate an optimized schedule ---------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This pass generates an entirely new schedule tree from the data dependences
11 // and iteration domains. The new schedule tree is computed in two steps:
12 //
13 // 1) The isl scheduling optimizer is run
14 //
15 // The isl scheduling optimizer creates a new schedule tree that maximizes
16 // parallelism and tileability and minimizes data-dependence distances. The
17 // algorithm used is a modified version of the ``Pluto'' algorithm:
18 //
19 //   U. Bondhugula, A. Hartono, J. Ramanujam, and P. Sadayappan.
20 //   A Practical Automatic Polyhedral Parallelizer and Locality Optimizer.
21 //   In Proceedings of the 2008 ACM SIGPLAN Conference On Programming Language
22 //   Design and Implementation, PLDI ’08, pages 101–113. ACM, 2008.
23 //
24 // 2) A set of post-scheduling transformations is applied on the schedule tree.
25 //
26 // These optimizations include:
27 //
28 //  - Tiling of the innermost tilable bands
29 //  - Prevectorization - The choice of a possible outer loop that is strip-mined
30 //                       to the innermost level to enable inner-loop
31 //                       vectorization.
32 //  - Some optimizations for spatial locality are also planned.
33 //
34 // For a detailed description of the schedule tree itself please see section 6
35 // of:
36 //
37 // Polyhedral AST generation is more than scanning polyhedra
38 // Tobias Grosser, Sven Verdoolaege, Albert Cohen
39 // ACM Transactions on Programming Languages and Systems (TOPLAS),
40 // 37(4), July 2015
41 // http://www.grosser.es/#pub-polyhedral-AST-generation
42 //
43 // This publication also contains a detailed discussion of the different options
44 // for polyhedral loop unrolling, full/partial tile separation and other uses
45 // of the schedule tree.
46 //
47 //===----------------------------------------------------------------------===//
48 
49 #include "polly/ScheduleOptimizer.h"
50 #include "polly/CodeGen/CodeGeneration.h"
51 #include "polly/DependenceInfo.h"
52 #include "polly/LinkAllPasses.h"
53 #include "polly/Options.h"
54 #include "polly/ScopInfo.h"
55 #include "polly/Support/GICHelper.h"
56 #include "polly/Support/ISLOStream.h"
57 #include "llvm/Analysis/TargetTransformInfo.h"
58 #include "llvm/Support/Debug.h"
59 #include "isl/aff.h"
60 #include "isl/band.h"
61 #include "isl/constraint.h"
62 #include "isl/map.h"
63 #include "isl/options.h"
64 #include "isl/printer.h"
65 #include "isl/schedule.h"
66 #include "isl/schedule_node.h"
67 #include "isl/space.h"
68 #include "isl/union_map.h"
69 #include "isl/union_set.h"
70 
71 using namespace llvm;
72 using namespace polly;
73 
74 #define DEBUG_TYPE "polly-opt-isl"
75 
76 static cl::opt<std::string>
77     OptimizeDeps("polly-opt-optimize-only",
78                  cl::desc("Only a certain kind of dependences (all/raw)"),
79                  cl::Hidden, cl::init("all"), cl::ZeroOrMore,
80                  cl::cat(PollyCategory));
81 
82 static cl::opt<std::string>
83     SimplifyDeps("polly-opt-simplify-deps",
84                  cl::desc("Dependences should be simplified (yes/no)"),
85                  cl::Hidden, cl::init("yes"), cl::ZeroOrMore,
86                  cl::cat(PollyCategory));
87 
88 static cl::opt<int> MaxConstantTerm(
89     "polly-opt-max-constant-term",
90     cl::desc("The maximal constant term allowed (-1 is unlimited)"), cl::Hidden,
91     cl::init(20), cl::ZeroOrMore, cl::cat(PollyCategory));
92 
93 static cl::opt<int> MaxCoefficient(
94     "polly-opt-max-coefficient",
95     cl::desc("The maximal coefficient allowed (-1 is unlimited)"), cl::Hidden,
96     cl::init(20), cl::ZeroOrMore, cl::cat(PollyCategory));
97 
98 static cl::opt<std::string> FusionStrategy(
99     "polly-opt-fusion", cl::desc("The fusion strategy to choose (min/max)"),
100     cl::Hidden, cl::init("min"), cl::ZeroOrMore, cl::cat(PollyCategory));
101 
102 static cl::opt<std::string>
103     MaximizeBandDepth("polly-opt-maximize-bands",
104                       cl::desc("Maximize the band depth (yes/no)"), cl::Hidden,
105                       cl::init("yes"), cl::ZeroOrMore, cl::cat(PollyCategory));
106 
107 static cl::opt<std::string> OuterCoincidence(
108     "polly-opt-outer-coincidence",
109     cl::desc("Try to construct schedules where the outer member of each band "
110              "satisfies the coincidence constraints (yes/no)"),
111     cl::Hidden, cl::init("no"), cl::ZeroOrMore, cl::cat(PollyCategory));
112 
113 static cl::opt<int> PrevectorWidth(
114     "polly-prevect-width",
115     cl::desc(
116         "The number of loop iterations to strip-mine for pre-vectorization"),
117     cl::Hidden, cl::init(4), cl::ZeroOrMore, cl::cat(PollyCategory));
118 
119 static cl::opt<bool> FirstLevelTiling("polly-tiling",
120                                       cl::desc("Enable loop tiling"),
121                                       cl::init(true), cl::ZeroOrMore,
122                                       cl::cat(PollyCategory));
123 
124 static cl::opt<int> LatencyVectorFma(
125     "polly-target-latency-vector-fma",
126     cl::desc("The minimal number of cycles between issuing two "
127              "dependent consecutive vector fused multiply-add "
128              "instructions."),
129     cl::Hidden, cl::init(8), cl::ZeroOrMore, cl::cat(PollyCategory));
130 
131 static cl::opt<int> ThroughputVectorFma(
132     "polly-target-throughput-vector-fma",
133     cl::desc("A throughput of the processor floating-point arithmetic units "
134              "expressed in the number of vector fused multiply-add "
135              "instructions per clock cycle."),
136     cl::Hidden, cl::init(1), cl::ZeroOrMore, cl::cat(PollyCategory));
137 
138 // This option, along with --polly-target-2nd-cache-level-associativity,
139 // --polly-target-1st-cache-level-size, and --polly-target-2st-cache-level-size
140 // represent the parameters of the target cache, which do not have typical
141 // values that can be used by default. However, to apply the pattern matching
142 // optimizations, we use the values of the parameters of Intel Core i7-3820
143 // SandyBridge in case the parameters are not specified. Such an approach helps
144 // also to attain the high-performance on IBM POWER System S822 and IBM Power
145 // 730 Express server.
146 static cl::opt<int> FirstCacheLevelAssociativity(
147     "polly-target-1st-cache-level-associativity",
148     cl::desc("The associativity of the first cache level."), cl::Hidden,
149     cl::init(8), cl::ZeroOrMore, cl::cat(PollyCategory));
150 
151 static cl::opt<int> SecondCacheLevelAssociativity(
152     "polly-target-2nd-cache-level-associativity",
153     cl::desc("The associativity of the second cache level."), cl::Hidden,
154     cl::init(8), cl::ZeroOrMore, cl::cat(PollyCategory));
155 
156 static cl::opt<int> FirstCacheLevelSize(
157     "polly-target-1st-cache-level-size",
158     cl::desc("The size of the first cache level specified in bytes."),
159     cl::Hidden, cl::init(32768), cl::ZeroOrMore, cl::cat(PollyCategory));
160 
161 static cl::opt<int> SecondCacheLevelSize(
162     "polly-target-2nd-cache-level-size",
163     cl::desc("The size of the second level specified in bytes."), cl::Hidden,
164     cl::init(262144), cl::ZeroOrMore, cl::cat(PollyCategory));
165 
166 static cl::opt<int> VectorRegisterBitwidth(
167     "polly-target-vector-register-bitwidth",
168     cl::desc("The size in bits of a vector register (if not set, this "
169              "information is taken from LLVM's target information."),
170     cl::Hidden, cl::init(-1), cl::ZeroOrMore, cl::cat(PollyCategory));
171 
172 static cl::opt<int> FirstLevelDefaultTileSize(
173     "polly-default-tile-size",
174     cl::desc("The default tile size (if not enough were provided by"
175              " --polly-tile-sizes)"),
176     cl::Hidden, cl::init(32), cl::ZeroOrMore, cl::cat(PollyCategory));
177 
178 static cl::list<int>
179     FirstLevelTileSizes("polly-tile-sizes",
180                         cl::desc("A tile size for each loop dimension, filled "
181                                  "with --polly-default-tile-size"),
182                         cl::Hidden, cl::ZeroOrMore, cl::CommaSeparated,
183                         cl::cat(PollyCategory));
184 
185 static cl::opt<bool>
186     SecondLevelTiling("polly-2nd-level-tiling",
187                       cl::desc("Enable a 2nd level loop of loop tiling"),
188                       cl::init(false), cl::ZeroOrMore, cl::cat(PollyCategory));
189 
190 static cl::opt<int> SecondLevelDefaultTileSize(
191     "polly-2nd-level-default-tile-size",
192     cl::desc("The default 2nd-level tile size (if not enough were provided by"
193              " --polly-2nd-level-tile-sizes)"),
194     cl::Hidden, cl::init(16), cl::ZeroOrMore, cl::cat(PollyCategory));
195 
196 static cl::list<int>
197     SecondLevelTileSizes("polly-2nd-level-tile-sizes",
198                          cl::desc("A tile size for each loop dimension, filled "
199                                   "with --polly-default-tile-size"),
200                          cl::Hidden, cl::ZeroOrMore, cl::CommaSeparated,
201                          cl::cat(PollyCategory));
202 
203 static cl::opt<bool> RegisterTiling("polly-register-tiling",
204                                     cl::desc("Enable register tiling"),
205                                     cl::init(false), cl::ZeroOrMore,
206                                     cl::cat(PollyCategory));
207 
208 static cl::opt<int> RegisterDefaultTileSize(
209     "polly-register-tiling-default-tile-size",
210     cl::desc("The default register tile size (if not enough were provided by"
211              " --polly-register-tile-sizes)"),
212     cl::Hidden, cl::init(2), cl::ZeroOrMore, cl::cat(PollyCategory));
213 
214 static cl::opt<int> PollyPatternMatchingNcQuotient(
215     "polly-pattern-matching-nc-quotient",
216     cl::desc("Quotient that is obtained by dividing Nc, the parameter of the"
217              "macro-kernel, by Nr, the parameter of the micro-kernel"),
218     cl::Hidden, cl::init(256), cl::ZeroOrMore, cl::cat(PollyCategory));
219 
220 static cl::list<int>
221     RegisterTileSizes("polly-register-tile-sizes",
222                       cl::desc("A tile size for each loop dimension, filled "
223                                "with --polly-register-tile-size"),
224                       cl::Hidden, cl::ZeroOrMore, cl::CommaSeparated,
225                       cl::cat(PollyCategory));
226 
227 static cl::opt<bool>
228     PMBasedOpts("polly-pattern-matching-based-opts",
229                 cl::desc("Perform optimizations based on pattern matching"),
230                 cl::init(true), cl::ZeroOrMore, cl::cat(PollyCategory));
231 
232 static cl::opt<bool> OptimizedScops(
233     "polly-optimized-scops",
234     cl::desc("Polly - Dump polyhedral description of Scops optimized with "
235              "the isl scheduling optimizer and the set of post-scheduling "
236              "transformations is applied on the schedule tree"),
237     cl::init(false), cl::ZeroOrMore, cl::cat(PollyCategory));
238 
239 /// Create an isl_union_set, which describes the isolate option based on
240 /// IsoalteDomain.
241 ///
242 /// @param IsolateDomain An isl_set whose @p OutDimsNum last dimensions should
243 ///                      belong to the current band node.
244 /// @param OutDimsNum    A number of dimensions that should belong to
245 ///                      the current band node.
246 static __isl_give isl_union_set *
247 getIsolateOptions(__isl_take isl_set *IsolateDomain, unsigned OutDimsNum) {
248   auto Dims = isl_set_dim(IsolateDomain, isl_dim_set);
249   assert(OutDimsNum <= Dims &&
250          "The isl_set IsolateDomain is used to describe the range of schedule "
251          "dimensions values, which should be isolated. Consequently, the "
252          "number of its dimensions should be greater than or equal to the "
253          "number of the schedule dimensions.");
254   auto *IsolateRelation = isl_map_from_domain(IsolateDomain);
255   IsolateRelation =
256       isl_map_move_dims(IsolateRelation, isl_dim_out, 0, isl_dim_in,
257                         Dims - OutDimsNum, OutDimsNum);
258   auto *IsolateOption = isl_map_wrap(IsolateRelation);
259   auto *Id = isl_id_alloc(isl_set_get_ctx(IsolateOption), "isolate", nullptr);
260   return isl_union_set_from_set(isl_set_set_tuple_id(IsolateOption, Id));
261 }
262 
263 /// Create an isl_union_set, which describes the atomic option for the dimension
264 /// of the current node.
265 ///
266 /// It may help to reduce the size of generated code.
267 ///
268 /// @param Ctx An isl_ctx, which is used to create the isl_union_set.
269 static __isl_give isl_union_set *getAtomicOptions(isl_ctx *Ctx) {
270   auto *Space = isl_space_set_alloc(Ctx, 0, 1);
271   auto *AtomicOption = isl_set_universe(Space);
272   auto *Id = isl_id_alloc(Ctx, "atomic", nullptr);
273   return isl_union_set_from_set(isl_set_set_tuple_id(AtomicOption, Id));
274 }
275 
276 /// Create an isl_union_set, which describes the option of the form
277 /// [isolate[] -> unroll[x]].
278 ///
279 /// @param Ctx An isl_ctx, which is used to create the isl_union_set.
280 static __isl_give isl_union_set *getUnrollIsolatedSetOptions(isl_ctx *Ctx) {
281   auto *Space = isl_space_alloc(Ctx, 0, 0, 1);
282   auto *UnrollIsolatedSetOption = isl_map_universe(Space);
283   auto *DimInId = isl_id_alloc(Ctx, "isolate", nullptr);
284   auto *DimOutId = isl_id_alloc(Ctx, "unroll", nullptr);
285   UnrollIsolatedSetOption =
286       isl_map_set_tuple_id(UnrollIsolatedSetOption, isl_dim_in, DimInId);
287   UnrollIsolatedSetOption =
288       isl_map_set_tuple_id(UnrollIsolatedSetOption, isl_dim_out, DimOutId);
289   return isl_union_set_from_set(isl_map_wrap(UnrollIsolatedSetOption));
290 }
291 
292 /// Make the last dimension of Set to take values from 0 to VectorWidth - 1.
293 ///
294 /// @param Set         A set, which should be modified.
295 /// @param VectorWidth A parameter, which determines the constraint.
296 static __isl_give isl_set *addExtentConstraints(__isl_take isl_set *Set,
297                                                 int VectorWidth) {
298   auto Dims = isl_set_dim(Set, isl_dim_set);
299   auto Space = isl_set_get_space(Set);
300   auto *LocalSpace = isl_local_space_from_space(Space);
301   auto *ExtConstr =
302       isl_constraint_alloc_inequality(isl_local_space_copy(LocalSpace));
303   ExtConstr = isl_constraint_set_constant_si(ExtConstr, 0);
304   ExtConstr =
305       isl_constraint_set_coefficient_si(ExtConstr, isl_dim_set, Dims - 1, 1);
306   Set = isl_set_add_constraint(Set, ExtConstr);
307   ExtConstr = isl_constraint_alloc_inequality(LocalSpace);
308   ExtConstr = isl_constraint_set_constant_si(ExtConstr, VectorWidth - 1);
309   ExtConstr =
310       isl_constraint_set_coefficient_si(ExtConstr, isl_dim_set, Dims - 1, -1);
311   return isl_set_add_constraint(Set, ExtConstr);
312 }
313 
314 /// Build the desired set of partial tile prefixes.
315 ///
316 /// We build a set of partial tile prefixes, which are prefixes of the vector
317 /// loop that have exactly VectorWidth iterations.
318 ///
319 /// 1. Get all prefixes of the vector loop.
320 /// 2. Extend it to a set, which has exactly VectorWidth iterations for
321 ///    any prefix from the set that was built on the previous step.
322 /// 3. Subtract loop domain from it, project out the vector loop dimension and
323 ///    get a set of prefixes, which don't have exactly VectorWidth iterations.
324 /// 4. Subtract it from all prefixes of the vector loop and get the desired
325 ///    set.
326 ///
327 /// @param ScheduleRange A range of a map, which describes a prefix schedule
328 ///                      relation.
329 static __isl_give isl_set *
330 getPartialTilePrefixes(__isl_take isl_set *ScheduleRange, int VectorWidth) {
331   auto Dims = isl_set_dim(ScheduleRange, isl_dim_set);
332   auto *LoopPrefixes = isl_set_project_out(isl_set_copy(ScheduleRange),
333                                            isl_dim_set, Dims - 1, 1);
334   auto *ExtentPrefixes =
335       isl_set_add_dims(isl_set_copy(LoopPrefixes), isl_dim_set, 1);
336   ExtentPrefixes = addExtentConstraints(ExtentPrefixes, VectorWidth);
337   auto *BadPrefixes = isl_set_subtract(ExtentPrefixes, ScheduleRange);
338   BadPrefixes = isl_set_project_out(BadPrefixes, isl_dim_set, Dims - 1, 1);
339   return isl_set_subtract(LoopPrefixes, BadPrefixes);
340 }
341 
342 __isl_give isl_schedule_node *ScheduleTreeOptimizer::isolateFullPartialTiles(
343     __isl_take isl_schedule_node *Node, int VectorWidth) {
344   assert(isl_schedule_node_get_type(Node) == isl_schedule_node_band);
345   Node = isl_schedule_node_child(Node, 0);
346   Node = isl_schedule_node_child(Node, 0);
347   auto *SchedRelUMap = isl_schedule_node_get_prefix_schedule_relation(Node);
348   auto *ScheduleRelation = isl_map_from_union_map(SchedRelUMap);
349   auto *ScheduleRange = isl_map_range(ScheduleRelation);
350   auto *IsolateDomain = getPartialTilePrefixes(ScheduleRange, VectorWidth);
351   auto *AtomicOption = getAtomicOptions(isl_set_get_ctx(IsolateDomain));
352   auto *IsolateOption = getIsolateOptions(IsolateDomain, 1);
353   Node = isl_schedule_node_parent(Node);
354   Node = isl_schedule_node_parent(Node);
355   auto *Options = isl_union_set_union(IsolateOption, AtomicOption);
356   Node = isl_schedule_node_band_set_ast_build_options(Node, Options);
357   return Node;
358 }
359 
360 __isl_give isl_schedule_node *
361 ScheduleTreeOptimizer::prevectSchedBand(__isl_take isl_schedule_node *Node,
362                                         unsigned DimToVectorize,
363                                         int VectorWidth) {
364   assert(isl_schedule_node_get_type(Node) == isl_schedule_node_band);
365 
366   auto Space = isl_schedule_node_band_get_space(Node);
367   auto ScheduleDimensions = isl_space_dim(Space, isl_dim_set);
368   isl_space_free(Space);
369   assert(DimToVectorize < ScheduleDimensions);
370 
371   if (DimToVectorize > 0) {
372     Node = isl_schedule_node_band_split(Node, DimToVectorize);
373     Node = isl_schedule_node_child(Node, 0);
374   }
375   if (DimToVectorize < ScheduleDimensions - 1)
376     Node = isl_schedule_node_band_split(Node, 1);
377   Space = isl_schedule_node_band_get_space(Node);
378   auto Sizes = isl_multi_val_zero(Space);
379   auto Ctx = isl_schedule_node_get_ctx(Node);
380   Sizes =
381       isl_multi_val_set_val(Sizes, 0, isl_val_int_from_si(Ctx, VectorWidth));
382   Node = isl_schedule_node_band_tile(Node, Sizes);
383   Node = isolateFullPartialTiles(Node, VectorWidth);
384   Node = isl_schedule_node_child(Node, 0);
385   // Make sure the "trivially vectorizable loop" is not unrolled. Otherwise,
386   // we will have troubles to match it in the backend.
387   Node = isl_schedule_node_band_set_ast_build_options(
388       Node, isl_union_set_read_from_str(Ctx, "{ unroll[x]: 1 = 0 }"));
389   Node = isl_schedule_node_band_sink(Node);
390   Node = isl_schedule_node_child(Node, 0);
391   if (isl_schedule_node_get_type(Node) == isl_schedule_node_leaf)
392     Node = isl_schedule_node_parent(Node);
393   isl_id *LoopMarker = isl_id_alloc(Ctx, "SIMD", nullptr);
394   Node = isl_schedule_node_insert_mark(Node, LoopMarker);
395   return Node;
396 }
397 
398 __isl_give isl_schedule_node *
399 ScheduleTreeOptimizer::tileNode(__isl_take isl_schedule_node *Node,
400                                 const char *Identifier, ArrayRef<int> TileSizes,
401                                 int DefaultTileSize) {
402   auto Ctx = isl_schedule_node_get_ctx(Node);
403   auto Space = isl_schedule_node_band_get_space(Node);
404   auto Dims = isl_space_dim(Space, isl_dim_set);
405   auto Sizes = isl_multi_val_zero(Space);
406   std::string IdentifierString(Identifier);
407   for (unsigned i = 0; i < Dims; i++) {
408     auto tileSize = i < TileSizes.size() ? TileSizes[i] : DefaultTileSize;
409     Sizes = isl_multi_val_set_val(Sizes, i, isl_val_int_from_si(Ctx, tileSize));
410   }
411   auto TileLoopMarkerStr = IdentifierString + " - Tiles";
412   isl_id *TileLoopMarker =
413       isl_id_alloc(Ctx, TileLoopMarkerStr.c_str(), nullptr);
414   Node = isl_schedule_node_insert_mark(Node, TileLoopMarker);
415   Node = isl_schedule_node_child(Node, 0);
416   Node = isl_schedule_node_band_tile(Node, Sizes);
417   Node = isl_schedule_node_child(Node, 0);
418   auto PointLoopMarkerStr = IdentifierString + " - Points";
419   isl_id *PointLoopMarker =
420       isl_id_alloc(Ctx, PointLoopMarkerStr.c_str(), nullptr);
421   Node = isl_schedule_node_insert_mark(Node, PointLoopMarker);
422   Node = isl_schedule_node_child(Node, 0);
423   return Node;
424 }
425 
426 __isl_give isl_schedule_node *
427 ScheduleTreeOptimizer::applyRegisterTiling(__isl_take isl_schedule_node *Node,
428                                            llvm::ArrayRef<int> TileSizes,
429                                            int DefaultTileSize) {
430   auto *Ctx = isl_schedule_node_get_ctx(Node);
431   Node = tileNode(Node, "Register tiling", TileSizes, DefaultTileSize);
432   Node = isl_schedule_node_band_set_ast_build_options(
433       Node, isl_union_set_read_from_str(Ctx, "{unroll[x]}"));
434   return Node;
435 }
436 
437 namespace {
438 bool isSimpleInnermostBand(const isl::schedule_node &Node) {
439   assert(isl_schedule_node_get_type(Node.keep()) == isl_schedule_node_band);
440   assert(isl_schedule_node_n_children(Node.keep()) == 1);
441 
442   auto ChildType = isl_schedule_node_get_type(Node.child(0).keep());
443 
444   if (ChildType == isl_schedule_node_leaf)
445     return true;
446 
447   if (ChildType != isl_schedule_node_sequence)
448     return false;
449 
450   auto Sequence = Node.child(0);
451 
452   for (int c = 0, nc = isl_schedule_node_n_children(Sequence.keep()); c < nc;
453        ++c) {
454     auto Child = Sequence.child(c);
455     if (isl_schedule_node_get_type(Child.keep()) != isl_schedule_node_filter)
456       return false;
457     if (isl_schedule_node_get_type(Child.child(0).keep()) !=
458         isl_schedule_node_leaf)
459       return false;
460   }
461   return true;
462 }
463 } // namespace
464 
465 bool ScheduleTreeOptimizer::isTileableBandNode(
466     __isl_keep isl_schedule_node *Node) {
467   if (isl_schedule_node_get_type(Node) != isl_schedule_node_band)
468     return false;
469 
470   if (isl_schedule_node_n_children(Node) != 1)
471     return false;
472 
473   if (!isl_schedule_node_band_get_permutable(Node))
474     return false;
475 
476   auto Space = isl_schedule_node_band_get_space(Node);
477   auto Dims = isl_space_dim(Space, isl_dim_set);
478   isl_space_free(Space);
479 
480   if (Dims <= 1)
481     return false;
482 
483   auto ManagedNode = isl::manage(isl_schedule_node_copy(Node));
484   return isSimpleInnermostBand(ManagedNode);
485 }
486 
487 __isl_give isl_schedule_node *
488 ScheduleTreeOptimizer::standardBandOpts(__isl_take isl_schedule_node *Node,
489                                         void *User) {
490   if (FirstLevelTiling)
491     Node = tileNode(Node, "1st level tiling", FirstLevelTileSizes,
492                     FirstLevelDefaultTileSize);
493 
494   if (SecondLevelTiling)
495     Node = tileNode(Node, "2nd level tiling", SecondLevelTileSizes,
496                     SecondLevelDefaultTileSize);
497 
498   if (RegisterTiling)
499     Node =
500         applyRegisterTiling(Node, RegisterTileSizes, RegisterDefaultTileSize);
501 
502   if (PollyVectorizerChoice == VECTORIZER_NONE)
503     return Node;
504 
505   auto Space = isl_schedule_node_band_get_space(Node);
506   auto Dims = isl_space_dim(Space, isl_dim_set);
507   isl_space_free(Space);
508 
509   for (int i = Dims - 1; i >= 0; i--)
510     if (isl_schedule_node_band_member_get_coincident(Node, i)) {
511       Node = prevectSchedBand(Node, i, PrevectorWidth);
512       break;
513     }
514 
515   return Node;
516 }
517 
518 /// Get the position of a dimension with a non-zero coefficient.
519 ///
520 /// Check that isl constraint @p Constraint has only one non-zero
521 /// coefficient for dimensions that have type @p DimType. If this is true,
522 /// return the position of the dimension corresponding to the non-zero
523 /// coefficient and negative value, otherwise.
524 ///
525 /// @param Constraint The isl constraint to be checked.
526 /// @param DimType    The type of the dimensions.
527 /// @return           The position of the dimension in case the isl
528 ///                   constraint satisfies the requirements, a negative
529 ///                   value, otherwise.
530 static int getMatMulConstraintDim(__isl_keep isl_constraint *Constraint,
531                                   enum isl_dim_type DimType) {
532   int DimPos = -1;
533   auto *LocalSpace = isl_constraint_get_local_space(Constraint);
534   int LocalSpaceDimNum = isl_local_space_dim(LocalSpace, DimType);
535   for (int i = 0; i < LocalSpaceDimNum; i++) {
536     auto *Val = isl_constraint_get_coefficient_val(Constraint, DimType, i);
537     if (isl_val_is_zero(Val)) {
538       isl_val_free(Val);
539       continue;
540     }
541     if (DimPos >= 0 || (DimType == isl_dim_out && !isl_val_is_one(Val)) ||
542         (DimType == isl_dim_in && !isl_val_is_negone(Val))) {
543       isl_val_free(Val);
544       isl_local_space_free(LocalSpace);
545       return -1;
546     }
547     DimPos = i;
548     isl_val_free(Val);
549   }
550   isl_local_space_free(LocalSpace);
551   return DimPos;
552 }
553 
554 /// Check the form of the isl constraint.
555 ///
556 /// Check that the @p DimInPos input dimension of the isl constraint
557 /// @p Constraint has a coefficient that is equal to negative one, the @p
558 /// DimOutPos has a coefficient that is equal to one and others
559 /// have coefficients equal to zero.
560 ///
561 /// @param Constraint The isl constraint to be checked.
562 /// @param DimInPos   The input dimension of the isl constraint.
563 /// @param DimOutPos  The output dimension of the isl constraint.
564 /// @return           isl_stat_ok in case the isl constraint satisfies
565 ///                   the requirements, isl_stat_error otherwise.
566 static isl_stat isMatMulOperandConstraint(__isl_keep isl_constraint *Constraint,
567                                           int &DimInPos, int &DimOutPos) {
568   auto *Val = isl_constraint_get_constant_val(Constraint);
569   if (!isl_constraint_is_equality(Constraint) || !isl_val_is_zero(Val)) {
570     isl_val_free(Val);
571     return isl_stat_error;
572   }
573   isl_val_free(Val);
574   DimInPos = getMatMulConstraintDim(Constraint, isl_dim_in);
575   if (DimInPos < 0)
576     return isl_stat_error;
577   DimOutPos = getMatMulConstraintDim(Constraint, isl_dim_out);
578   if (DimOutPos < 0)
579     return isl_stat_error;
580   return isl_stat_ok;
581 }
582 
583 /// Check that the access relation corresponds to a non-constant operand
584 /// of the matrix multiplication.
585 ///
586 /// Access relations that correspond to non-constant operands of the matrix
587 /// multiplication depend only on two input dimensions and have two output
588 /// dimensions. The function checks that the isl basic map @p bmap satisfies
589 /// the requirements. The two input dimensions can be specified via @p user
590 /// array.
591 ///
592 /// @param bmap The isl basic map to be checked.
593 /// @param user The input dimensions of @p bmap.
594 /// @return     isl_stat_ok in case isl basic map satisfies the requirements,
595 ///             isl_stat_error otherwise.
596 static isl_stat isMatMulOperandBasicMap(__isl_take isl_basic_map *bmap,
597                                         void *user) {
598   auto *Constraints = isl_basic_map_get_constraint_list(bmap);
599   isl_basic_map_free(bmap);
600   if (isl_constraint_list_n_constraint(Constraints) != 2) {
601     isl_constraint_list_free(Constraints);
602     return isl_stat_error;
603   }
604   int InPosPair[] = {-1, -1};
605   auto DimInPos = user ? static_cast<int *>(user) : InPosPair;
606   for (int i = 0; i < 2; i++) {
607     auto *Constraint = isl_constraint_list_get_constraint(Constraints, i);
608     int InPos, OutPos;
609     if (isMatMulOperandConstraint(Constraint, InPos, OutPos) ==
610             isl_stat_error ||
611         OutPos > 1 || (DimInPos[OutPos] >= 0 && DimInPos[OutPos] != InPos)) {
612       isl_constraint_free(Constraint);
613       isl_constraint_list_free(Constraints);
614       return isl_stat_error;
615     }
616     DimInPos[OutPos] = InPos;
617     isl_constraint_free(Constraint);
618   }
619   isl_constraint_list_free(Constraints);
620   return isl_stat_ok;
621 }
622 
623 /// Permute the two dimensions of the isl map.
624 ///
625 /// Permute @p DstPos and @p SrcPos dimensions of the isl map @p Map that
626 /// have type @p DimType.
627 ///
628 /// @param Map     The isl map to be modified.
629 /// @param DimType The type of the dimensions.
630 /// @param DstPos  The first dimension.
631 /// @param SrcPos  The second dimension.
632 /// @return        The modified map.
633 __isl_give isl_map *permuteDimensions(__isl_take isl_map *Map,
634                                       enum isl_dim_type DimType,
635                                       unsigned DstPos, unsigned SrcPos) {
636   assert(DstPos < isl_map_dim(Map, DimType) &&
637          SrcPos < isl_map_dim(Map, DimType));
638   if (DstPos == SrcPos)
639     return Map;
640   isl_id *DimId = nullptr;
641   if (isl_map_has_tuple_id(Map, DimType))
642     DimId = isl_map_get_tuple_id(Map, DimType);
643   auto FreeDim = DimType == isl_dim_in ? isl_dim_out : isl_dim_in;
644   isl_id *FreeDimId = nullptr;
645   if (isl_map_has_tuple_id(Map, FreeDim))
646     FreeDimId = isl_map_get_tuple_id(Map, FreeDim);
647   auto MaxDim = std::max(DstPos, SrcPos);
648   auto MinDim = std::min(DstPos, SrcPos);
649   Map = isl_map_move_dims(Map, FreeDim, 0, DimType, MaxDim, 1);
650   Map = isl_map_move_dims(Map, FreeDim, 0, DimType, MinDim, 1);
651   Map = isl_map_move_dims(Map, DimType, MinDim, FreeDim, 1, 1);
652   Map = isl_map_move_dims(Map, DimType, MaxDim, FreeDim, 0, 1);
653   if (DimId)
654     Map = isl_map_set_tuple_id(Map, DimType, DimId);
655   if (FreeDimId)
656     Map = isl_map_set_tuple_id(Map, FreeDim, FreeDimId);
657   return Map;
658 }
659 
660 /// Check the form of the access relation.
661 ///
662 /// Check that the access relation @p AccMap has the form M[i][j], where i
663 /// is a @p FirstPos and j is a @p SecondPos.
664 ///
665 /// @param AccMap    The access relation to be checked.
666 /// @param FirstPos  The index of the input dimension that is mapped to
667 ///                  the first output dimension.
668 /// @param SecondPos The index of the input dimension that is mapped to the
669 ///                  second output dimension.
670 /// @return          True in case @p AccMap has the expected form and false,
671 ///                  otherwise.
672 static bool isMatMulOperandAcc(__isl_keep isl_map *AccMap, int &FirstPos,
673                                int &SecondPos) {
674   int DimInPos[] = {FirstPos, SecondPos};
675   if (isl_map_foreach_basic_map(AccMap, isMatMulOperandBasicMap,
676                                 static_cast<void *>(DimInPos)) != isl_stat_ok ||
677       DimInPos[0] < 0 || DimInPos[1] < 0)
678     return false;
679   FirstPos = DimInPos[0];
680   SecondPos = DimInPos[1];
681   return true;
682 }
683 
684 /// Does the memory access represent a non-scalar operand of the matrix
685 /// multiplication.
686 ///
687 /// Check that the memory access @p MemAccess is the read access to a non-scalar
688 /// operand of the matrix multiplication or its result.
689 ///
690 /// @param MemAccess The memory access to be checked.
691 /// @param MMI       Parameters of the matrix multiplication operands.
692 /// @return          True in case the memory access represents the read access
693 ///                  to a non-scalar operand of the matrix multiplication and
694 ///                  false, otherwise.
695 static bool isMatMulNonScalarReadAccess(MemoryAccess *MemAccess,
696                                         MatMulInfoTy &MMI) {
697   if (!MemAccess->isArrayKind() || !MemAccess->isRead())
698     return false;
699   isl_map *AccMap = MemAccess->getAccessRelation();
700   if (isMatMulOperandAcc(AccMap, MMI.i, MMI.j) && !MMI.ReadFromC &&
701       isl_map_n_basic_map(AccMap) == 1) {
702     MMI.ReadFromC = MemAccess;
703     isl_map_free(AccMap);
704     return true;
705   }
706   if (isMatMulOperandAcc(AccMap, MMI.i, MMI.k) && !MMI.A &&
707       isl_map_n_basic_map(AccMap) == 1) {
708     MMI.A = MemAccess;
709     isl_map_free(AccMap);
710     return true;
711   }
712   if (isMatMulOperandAcc(AccMap, MMI.k, MMI.j) && !MMI.B &&
713       isl_map_n_basic_map(AccMap) == 1) {
714     MMI.B = MemAccess;
715     isl_map_free(AccMap);
716     return true;
717   }
718   isl_map_free(AccMap);
719   return false;
720 }
721 
722 /// Check accesses to operands of the matrix multiplication.
723 ///
724 /// Check that accesses of the SCoP statement, which corresponds to
725 /// the partial schedule @p PartialSchedule, are scalar in terms of loops
726 /// containing the matrix multiplication, in case they do not represent
727 /// accesses to the non-scalar operands of the matrix multiplication or
728 /// its result.
729 ///
730 /// @param  PartialSchedule The partial schedule of the SCoP statement.
731 /// @param  MMI             Parameters of the matrix multiplication operands.
732 /// @return                 True in case the corresponding SCoP statement
733 ///                         represents matrix multiplication and false,
734 ///                         otherwise.
735 static bool containsOnlyMatrMultAcc(__isl_keep isl_map *PartialSchedule,
736                                     MatMulInfoTy &MMI) {
737   auto *InputDimId = isl_map_get_tuple_id(PartialSchedule, isl_dim_in);
738   auto *Stmt = static_cast<ScopStmt *>(isl_id_get_user(InputDimId));
739   isl_id_free(InputDimId);
740   unsigned OutDimNum = isl_map_dim(PartialSchedule, isl_dim_out);
741   assert(OutDimNum > 2 && "In case of the matrix multiplication the loop nest "
742                           "and, consequently, the corresponding scheduling "
743                           "functions have at least three dimensions.");
744   auto *MapI = permuteDimensions(isl_map_copy(PartialSchedule), isl_dim_out,
745                                  MMI.i, OutDimNum - 1);
746   auto *MapJ = permuteDimensions(isl_map_copy(PartialSchedule), isl_dim_out,
747                                  MMI.j, OutDimNum - 1);
748   auto *MapK = permuteDimensions(isl_map_copy(PartialSchedule), isl_dim_out,
749                                  MMI.k, OutDimNum - 1);
750   for (auto *MemA = Stmt->begin(); MemA != Stmt->end() - 1; MemA++) {
751     auto *MemAccessPtr = *MemA;
752     if (MemAccessPtr->isArrayKind() && MemAccessPtr != MMI.WriteToC &&
753         !isMatMulNonScalarReadAccess(MemAccessPtr, MMI) &&
754         !(MemAccessPtr->isStrideZero(isl_map_copy(MapI)) &&
755           MemAccessPtr->isStrideZero(isl_map_copy(MapJ)) &&
756           MemAccessPtr->isStrideZero(isl_map_copy(MapK)))) {
757       isl_map_free(MapI);
758       isl_map_free(MapJ);
759       isl_map_free(MapK);
760       return false;
761     }
762   }
763   isl_map_free(MapI);
764   isl_map_free(MapJ);
765   isl_map_free(MapK);
766   return true;
767 }
768 
769 /// Check for dependencies corresponding to the matrix multiplication.
770 ///
771 /// Check that there is only true dependence of the form
772 /// S(..., k, ...) -> S(..., k + 1, …), where S is the SCoP statement
773 /// represented by @p Schedule and k is @p Pos. Such a dependence corresponds
774 /// to the dependency produced by the matrix multiplication.
775 ///
776 /// @param  Schedule The schedule of the SCoP statement.
777 /// @param  D The SCoP dependencies.
778 /// @param  Pos The parameter to desribe an acceptable true dependence.
779 ///             In case it has a negative value, try to determine its
780 ///             acceptable value.
781 /// @return True in case dependencies correspond to the matrix multiplication
782 ///         and false, otherwise.
783 static bool containsOnlyMatMulDep(__isl_keep isl_map *Schedule,
784                                   const Dependences *D, int &Pos) {
785   auto *Dep = D->getDependences(Dependences::TYPE_RAW);
786   auto *Red = D->getDependences(Dependences::TYPE_RED);
787   if (Red)
788     Dep = isl_union_map_union(Dep, Red);
789   auto *DomainSpace = isl_space_domain(isl_map_get_space(Schedule));
790   auto *Space = isl_space_map_from_domain_and_range(isl_space_copy(DomainSpace),
791                                                     DomainSpace);
792   auto *Deltas = isl_map_deltas(isl_union_map_extract_map(Dep, Space));
793   isl_union_map_free(Dep);
794   int DeltasDimNum = isl_set_dim(Deltas, isl_dim_set);
795   for (int i = 0; i < DeltasDimNum; i++) {
796     auto *Val = isl_set_plain_get_val_if_fixed(Deltas, isl_dim_set, i);
797     Pos = Pos < 0 && isl_val_is_one(Val) ? i : Pos;
798     if (isl_val_is_nan(Val) ||
799         !(isl_val_is_zero(Val) || (i == Pos && isl_val_is_one(Val)))) {
800       isl_val_free(Val);
801       isl_set_free(Deltas);
802       return false;
803     }
804     isl_val_free(Val);
805   }
806   isl_set_free(Deltas);
807   if (DeltasDimNum == 0 || Pos < 0)
808     return false;
809   return true;
810 }
811 
812 /// Check if the SCoP statement could probably be optimized with analytical
813 /// modeling.
814 ///
815 /// containsMatrMult tries to determine whether the following conditions
816 /// are true:
817 /// 1. The last memory access modeling an array, MA1, represents writing to
818 ///    memory and has the form S(..., i1, ..., i2, ...) -> M(i1, i2) or
819 ///    S(..., i2, ..., i1, ...) -> M(i1, i2), where S is the SCoP statement
820 ///    under consideration.
821 /// 2. There is only one loop-carried true dependency, and it has the
822 ///    form S(..., i3, ...) -> S(..., i3 + 1, ...), and there are no
823 ///    loop-carried or anti dependencies.
824 /// 3. SCoP contains three access relations, MA2, MA3, and MA4 that represent
825 ///    reading from memory and have the form S(..., i3, ...) -> M(i1, i3),
826 ///    S(..., i3, ...) -> M(i3, i2), S(...) -> M(i1, i2), respectively,
827 ///    and all memory accesses of the SCoP that are different from MA1, MA2,
828 ///    MA3, and MA4 have stride 0, if the innermost loop is exchanged with any
829 ///    of loops i1, i2 and i3.
830 ///
831 /// @param PartialSchedule The PartialSchedule that contains a SCoP statement
832 ///        to check.
833 /// @D     The SCoP dependencies.
834 /// @MMI   Parameters of the matrix multiplication operands.
835 static bool containsMatrMult(__isl_keep isl_map *PartialSchedule,
836                              const Dependences *D, MatMulInfoTy &MMI) {
837   auto *InputDimsId = isl_map_get_tuple_id(PartialSchedule, isl_dim_in);
838   auto *Stmt = static_cast<ScopStmt *>(isl_id_get_user(InputDimsId));
839   isl_id_free(InputDimsId);
840   if (Stmt->size() <= 1)
841     return false;
842   for (auto *MemA = Stmt->end() - 1; MemA != Stmt->begin(); MemA--) {
843     auto *MemAccessPtr = *MemA;
844     if (!MemAccessPtr->isArrayKind())
845       continue;
846     if (!MemAccessPtr->isWrite())
847       return false;
848     auto *AccMap = MemAccessPtr->getAccessRelation();
849     if (isl_map_n_basic_map(AccMap) != 1 ||
850         !isMatMulOperandAcc(AccMap, MMI.i, MMI.j)) {
851       isl_map_free(AccMap);
852       return false;
853     }
854     isl_map_free(AccMap);
855     MMI.WriteToC = MemAccessPtr;
856     break;
857   }
858 
859   if (!containsOnlyMatMulDep(PartialSchedule, D, MMI.k))
860     return false;
861 
862   if (!MMI.WriteToC || !containsOnlyMatrMultAcc(PartialSchedule, MMI))
863     return false;
864 
865   if (!MMI.A || !MMI.B || !MMI.ReadFromC)
866     return false;
867   return true;
868 }
869 
870 /// Permute two dimensions of the band node.
871 ///
872 /// Permute FirstDim and SecondDim dimensions of the Node.
873 ///
874 /// @param Node The band node to be modified.
875 /// @param FirstDim The first dimension to be permuted.
876 /// @param SecondDim The second dimension to be permuted.
877 static __isl_give isl_schedule_node *
878 permuteBandNodeDimensions(__isl_take isl_schedule_node *Node, unsigned FirstDim,
879                           unsigned SecondDim) {
880   assert(isl_schedule_node_get_type(Node) == isl_schedule_node_band &&
881          isl_schedule_node_band_n_member(Node) > std::max(FirstDim, SecondDim));
882   auto PartialSchedule = isl_schedule_node_band_get_partial_schedule(Node);
883   auto PartialScheduleFirstDim =
884       isl_multi_union_pw_aff_get_union_pw_aff(PartialSchedule, FirstDim);
885   auto PartialScheduleSecondDim =
886       isl_multi_union_pw_aff_get_union_pw_aff(PartialSchedule, SecondDim);
887   PartialSchedule = isl_multi_union_pw_aff_set_union_pw_aff(
888       PartialSchedule, SecondDim, PartialScheduleFirstDim);
889   PartialSchedule = isl_multi_union_pw_aff_set_union_pw_aff(
890       PartialSchedule, FirstDim, PartialScheduleSecondDim);
891   Node = isl_schedule_node_delete(Node);
892   Node = isl_schedule_node_insert_partial_schedule(Node, PartialSchedule);
893   return Node;
894 }
895 
896 __isl_give isl_schedule_node *ScheduleTreeOptimizer::createMicroKernel(
897     __isl_take isl_schedule_node *Node, MicroKernelParamsTy MicroKernelParams) {
898   applyRegisterTiling(Node, {MicroKernelParams.Mr, MicroKernelParams.Nr}, 1);
899   Node = isl_schedule_node_parent(isl_schedule_node_parent(Node));
900   Node = permuteBandNodeDimensions(Node, 0, 1);
901   return isl_schedule_node_child(isl_schedule_node_child(Node, 0), 0);
902 }
903 
904 __isl_give isl_schedule_node *ScheduleTreeOptimizer::createMacroKernel(
905     __isl_take isl_schedule_node *Node, MacroKernelParamsTy MacroKernelParams) {
906   assert(isl_schedule_node_get_type(Node) == isl_schedule_node_band);
907   if (MacroKernelParams.Mc == 1 && MacroKernelParams.Nc == 1 &&
908       MacroKernelParams.Kc == 1)
909     return Node;
910   int DimOutNum = isl_schedule_node_band_n_member(Node);
911   std::vector<int> TileSizes(DimOutNum, 1);
912   TileSizes[DimOutNum - 3] = MacroKernelParams.Mc;
913   TileSizes[DimOutNum - 2] = MacroKernelParams.Nc;
914   TileSizes[DimOutNum - 1] = MacroKernelParams.Kc;
915   Node = tileNode(Node, "1st level tiling", TileSizes, 1);
916   Node = isl_schedule_node_parent(isl_schedule_node_parent(Node));
917   Node = permuteBandNodeDimensions(Node, DimOutNum - 2, DimOutNum - 1);
918   Node = permuteBandNodeDimensions(Node, DimOutNum - 3, DimOutNum - 1);
919   return isl_schedule_node_child(isl_schedule_node_child(Node, 0), 0);
920 }
921 
922 /// Get the size of the widest type of the matrix multiplication operands
923 /// in bytes, including alignment padding.
924 ///
925 /// @param MMI Parameters of the matrix multiplication operands.
926 /// @return The size of the widest type of the matrix multiplication operands
927 ///         in bytes, including alignment padding.
928 static uint64_t getMatMulAlignTypeSize(MatMulInfoTy MMI) {
929   auto *S = MMI.A->getStatement()->getParent();
930   auto &DL = S->getFunction().getParent()->getDataLayout();
931   auto ElementSizeA = DL.getTypeAllocSize(MMI.A->getElementType());
932   auto ElementSizeB = DL.getTypeAllocSize(MMI.B->getElementType());
933   auto ElementSizeC = DL.getTypeAllocSize(MMI.WriteToC->getElementType());
934   return std::max({ElementSizeA, ElementSizeB, ElementSizeC});
935 }
936 
937 /// Get the size of the widest type of the matrix multiplication operands
938 /// in bits.
939 ///
940 /// @param MMI Parameters of the matrix multiplication operands.
941 /// @return The size of the widest type of the matrix multiplication operands
942 ///         in bits.
943 static uint64_t getMatMulTypeSize(MatMulInfoTy MMI) {
944   auto *S = MMI.A->getStatement()->getParent();
945   auto &DL = S->getFunction().getParent()->getDataLayout();
946   auto ElementSizeA = DL.getTypeSizeInBits(MMI.A->getElementType());
947   auto ElementSizeB = DL.getTypeSizeInBits(MMI.B->getElementType());
948   auto ElementSizeC = DL.getTypeSizeInBits(MMI.WriteToC->getElementType());
949   return std::max({ElementSizeA, ElementSizeB, ElementSizeC});
950 }
951 
952 /// Get parameters of the BLIS micro kernel.
953 ///
954 /// We choose the Mr and Nr parameters of the micro kernel to be large enough
955 /// such that no stalls caused by the combination of latencies and dependencies
956 /// are introduced during the updates of the resulting matrix of the matrix
957 /// multiplication. However, they should also be as small as possible to
958 /// release more registers for entries of multiplied matrices.
959 ///
960 /// @param TTI Target Transform Info.
961 /// @param MMI Parameters of the matrix multiplication operands.
962 /// @return The structure of type MicroKernelParamsTy.
963 /// @see MicroKernelParamsTy
964 static struct MicroKernelParamsTy
965 getMicroKernelParams(const llvm::TargetTransformInfo *TTI, MatMulInfoTy MMI) {
966   assert(TTI && "The target transform info should be provided.");
967 
968   // Nvec - Number of double-precision floating-point numbers that can be hold
969   // by a vector register. Use 2 by default.
970   long RegisterBitwidth = VectorRegisterBitwidth;
971 
972   if (RegisterBitwidth == -1)
973     RegisterBitwidth = TTI->getRegisterBitWidth(true);
974   auto ElementSize = getMatMulTypeSize(MMI);
975   assert(ElementSize > 0 && "The element size of the matrix multiplication "
976                             "operands should be greater than zero.");
977   auto Nvec = RegisterBitwidth / ElementSize;
978   if (Nvec == 0)
979     Nvec = 2;
980   int Nr =
981       ceil(sqrt(Nvec * LatencyVectorFma * ThroughputVectorFma) / Nvec) * Nvec;
982   int Mr = ceil(Nvec * LatencyVectorFma * ThroughputVectorFma / Nr);
983   return {Mr, Nr};
984 }
985 
986 /// Get parameters of the BLIS macro kernel.
987 ///
988 /// During the computation of matrix multiplication, blocks of partitioned
989 /// matrices are mapped to different layers of the memory hierarchy.
990 /// To optimize data reuse, blocks should be ideally kept in cache between
991 /// iterations. Since parameters of the macro kernel determine sizes of these
992 /// blocks, there are upper and lower bounds on these parameters.
993 ///
994 /// @param MicroKernelParams Parameters of the micro-kernel
995 ///                          to be taken into account.
996 /// @param MMI Parameters of the matrix multiplication operands.
997 /// @return The structure of type MacroKernelParamsTy.
998 /// @see MacroKernelParamsTy
999 /// @see MicroKernelParamsTy
1000 static struct MacroKernelParamsTy
1001 getMacroKernelParams(const MicroKernelParamsTy &MicroKernelParams,
1002                      MatMulInfoTy MMI) {
1003   // According to www.cs.utexas.edu/users/flame/pubs/TOMS-BLIS-Analytical.pdf,
1004   // it requires information about the first two levels of a cache to determine
1005   // all the parameters of a macro-kernel. It also checks that an associativity
1006   // degree of a cache level is greater than two. Otherwise, another algorithm
1007   // for determination of the parameters should be used.
1008   if (!(MicroKernelParams.Mr > 0 && MicroKernelParams.Nr > 0 &&
1009         FirstCacheLevelSize > 0 && SecondCacheLevelSize > 0 &&
1010         FirstCacheLevelAssociativity > 2 && SecondCacheLevelAssociativity > 2))
1011     return {1, 1, 1};
1012   // The quotient should be greater than zero.
1013   if (PollyPatternMatchingNcQuotient <= 0)
1014     return {1, 1, 1};
1015   int Car = floor(
1016       (FirstCacheLevelAssociativity - 1) /
1017       (1 + static_cast<double>(MicroKernelParams.Nr) / MicroKernelParams.Mr));
1018 
1019   // Car can be computed to be zero since it is floor to int.
1020   // On Mac OS, division by 0 does not raise a signal. This causes negative
1021   // tile sizes to be computed. Prevent division by 0 Cac by early returning
1022   // if this happens.
1023   if (Car == 0)
1024     return {1, 1, 1};
1025 
1026   auto ElementSize = getMatMulAlignTypeSize(MMI);
1027   assert(ElementSize > 0 && "The element size of the matrix multiplication "
1028                             "operands should be greater than zero.");
1029   int Kc = (Car * FirstCacheLevelSize) /
1030            (MicroKernelParams.Mr * FirstCacheLevelAssociativity * ElementSize);
1031   double Cac =
1032       static_cast<double>(Kc * ElementSize * SecondCacheLevelAssociativity) /
1033       SecondCacheLevelSize;
1034   int Mc = floor((SecondCacheLevelAssociativity - 2) / Cac);
1035   int Nc = PollyPatternMatchingNcQuotient * MicroKernelParams.Nr;
1036 
1037   assert(Mc > 0 && Nc > 0 && Kc > 0 &&
1038          "Matrix block sizes should be  greater than zero");
1039   return {Mc, Nc, Kc};
1040 }
1041 
1042 /// Create an access relation that is specific to
1043 ///        the matrix multiplication pattern.
1044 ///
1045 /// Create an access relation of the following form:
1046 /// [O0, O1, O2, O3, O4, O5, O6, O7, O8] -> [OI, O5, OJ]
1047 /// where I is @p FirstDim, J is @p SecondDim.
1048 ///
1049 /// It can be used, for example, to create relations that helps to consequently
1050 /// access elements of operands of a matrix multiplication after creation of
1051 /// the BLIS micro and macro kernels.
1052 ///
1053 /// @see ScheduleTreeOptimizer::createMicroKernel
1054 /// @see ScheduleTreeOptimizer::createMacroKernel
1055 ///
1056 /// Subsequently, the described access relation is applied to the range of
1057 /// @p MapOldIndVar, that is used to map original induction variables to
1058 /// the ones, which are produced by schedule transformations. It helps to
1059 /// define relations using a new space and, at the same time, keep them
1060 /// in the original one.
1061 ///
1062 /// @param MapOldIndVar The relation, which maps original induction variables
1063 ///                     to the ones, which are produced by schedule
1064 ///                     transformations.
1065 /// @param FirstDim, SecondDim The input dimensions that are used to define
1066 ///        the specified access relation.
1067 /// @return The specified access relation.
1068 __isl_give isl_map *getMatMulAccRel(__isl_take isl_map *MapOldIndVar,
1069                                     unsigned FirstDim, unsigned SecondDim) {
1070   auto *Ctx = isl_map_get_ctx(MapOldIndVar);
1071   auto *AccessRelSpace = isl_space_alloc(Ctx, 0, 9, 3);
1072   auto *AccessRel = isl_map_universe(AccessRelSpace);
1073   AccessRel = isl_map_equate(AccessRel, isl_dim_in, FirstDim, isl_dim_out, 0);
1074   AccessRel = isl_map_equate(AccessRel, isl_dim_in, 5, isl_dim_out, 1);
1075   AccessRel = isl_map_equate(AccessRel, isl_dim_in, SecondDim, isl_dim_out, 2);
1076   return isl_map_apply_range(MapOldIndVar, AccessRel);
1077 }
1078 
1079 __isl_give isl_schedule_node *
1080 createExtensionNode(__isl_take isl_schedule_node *Node,
1081                     __isl_take isl_map *ExtensionMap) {
1082   auto *Extension = isl_union_map_from_map(ExtensionMap);
1083   auto *NewNode = isl_schedule_node_from_extension(Extension);
1084   return isl_schedule_node_graft_before(Node, NewNode);
1085 }
1086 
1087 /// Apply the packing transformation.
1088 ///
1089 /// The packing transformation can be described as a data-layout
1090 /// transformation that requires to introduce a new array, copy data
1091 /// to the array, and change memory access locations to reference the array.
1092 /// It can be used to ensure that elements of the new array are read in-stride
1093 /// access, aligned to cache lines boundaries, and preloaded into certain cache
1094 /// levels.
1095 ///
1096 /// As an example let us consider the packing of the array A that would help
1097 /// to read its elements with in-stride access. An access to the array A
1098 /// is represented by an access relation that has the form
1099 /// S[i, j, k] -> A[i, k]. The scheduling function of the SCoP statement S has
1100 /// the form S[i,j, k] -> [floor((j mod Nc) / Nr), floor((i mod Mc) / Mr),
1101 /// k mod Kc, j mod Nr, i mod Mr].
1102 ///
1103 /// To ensure that elements of the array A are read in-stride access, we add
1104 /// a new array Packed_A[Mc/Mr][Kc][Mr] to the SCoP, using
1105 /// Scop::createScopArrayInfo, change the access relation
1106 /// S[i, j, k] -> A[i, k] to
1107 /// S[i, j, k] -> Packed_A[floor((i mod Mc) / Mr), k mod Kc, i mod Mr], using
1108 /// MemoryAccess::setNewAccessRelation, and copy the data to the array, using
1109 /// the copy statement created by Scop::addScopStmt.
1110 ///
1111 /// @param Node The schedule node to be optimized.
1112 /// @param MapOldIndVar The relation, which maps original induction variables
1113 ///                     to the ones, which are produced by schedule
1114 ///                     transformations.
1115 /// @param MicroParams, MacroParams Parameters of the BLIS kernel
1116 ///                                 to be taken into account.
1117 /// @param MMI Parameters of the matrix multiplication operands.
1118 /// @return The optimized schedule node.
1119 static __isl_give isl_schedule_node *optimizeDataLayoutMatrMulPattern(
1120     __isl_take isl_schedule_node *Node, __isl_take isl_map *MapOldIndVar,
1121     MicroKernelParamsTy MicroParams, MacroKernelParamsTy MacroParams,
1122     MatMulInfoTy &MMI) {
1123   auto InputDimsId = isl_map_get_tuple_id(MapOldIndVar, isl_dim_in);
1124   auto *Stmt = static_cast<ScopStmt *>(isl_id_get_user(InputDimsId));
1125   isl_id_free(InputDimsId);
1126 
1127   // Create a copy statement that corresponds to the memory access to the
1128   // matrix B, the second operand of the matrix multiplication.
1129   Node = isl_schedule_node_parent(isl_schedule_node_parent(Node));
1130   Node = isl_schedule_node_parent(isl_schedule_node_parent(Node));
1131   Node = isl_schedule_node_parent(Node);
1132   Node = isl_schedule_node_child(isl_schedule_node_band_split(Node, 2), 0);
1133   auto *AccRel = getMatMulAccRel(isl_map_copy(MapOldIndVar), 3, 7);
1134   unsigned FirstDimSize = MacroParams.Nc / MicroParams.Nr;
1135   unsigned SecondDimSize = MacroParams.Kc;
1136   unsigned ThirdDimSize = MicroParams.Nr;
1137   auto *SAI = Stmt->getParent()->createScopArrayInfo(
1138       MMI.B->getElementType(), "Packed_B",
1139       {FirstDimSize, SecondDimSize, ThirdDimSize});
1140   AccRel = isl_map_set_tuple_id(AccRel, isl_dim_out, SAI->getBasePtrId());
1141   auto *OldAcc = MMI.B->getAccessRelation();
1142   MMI.B->setNewAccessRelation(AccRel);
1143   auto *ExtMap =
1144       isl_map_project_out(isl_map_copy(MapOldIndVar), isl_dim_out, 2,
1145                           isl_map_dim(MapOldIndVar, isl_dim_out) - 2);
1146   ExtMap = isl_map_reverse(ExtMap);
1147   ExtMap = isl_map_fix_si(ExtMap, isl_dim_out, MMI.i, 0);
1148   auto *Domain = Stmt->getDomain();
1149 
1150   // Restrict the domains of the copy statements to only execute when also its
1151   // originating statement is executed.
1152   auto *DomainId = isl_set_get_tuple_id(Domain);
1153   auto *NewStmt = Stmt->getParent()->addScopStmt(
1154       OldAcc, MMI.B->getAccessRelation(), isl_set_copy(Domain));
1155   ExtMap = isl_map_set_tuple_id(ExtMap, isl_dim_out, isl_id_copy(DomainId));
1156   ExtMap = isl_map_intersect_range(ExtMap, isl_set_copy(Domain));
1157   ExtMap = isl_map_set_tuple_id(ExtMap, isl_dim_out, NewStmt->getDomainId());
1158   Node = createExtensionNode(Node, ExtMap);
1159 
1160   // Create a copy statement that corresponds to the memory access
1161   // to the matrix A, the first operand of the matrix multiplication.
1162   Node = isl_schedule_node_child(Node, 0);
1163   AccRel = getMatMulAccRel(isl_map_copy(MapOldIndVar), 4, 6);
1164   FirstDimSize = MacroParams.Mc / MicroParams.Mr;
1165   ThirdDimSize = MicroParams.Mr;
1166   SAI = Stmt->getParent()->createScopArrayInfo(
1167       MMI.A->getElementType(), "Packed_A",
1168       {FirstDimSize, SecondDimSize, ThirdDimSize});
1169   AccRel = isl_map_set_tuple_id(AccRel, isl_dim_out, SAI->getBasePtrId());
1170   OldAcc = MMI.A->getAccessRelation();
1171   MMI.A->setNewAccessRelation(AccRel);
1172   ExtMap = isl_map_project_out(MapOldIndVar, isl_dim_out, 3,
1173                                isl_map_dim(MapOldIndVar, isl_dim_out) - 3);
1174   ExtMap = isl_map_reverse(ExtMap);
1175   ExtMap = isl_map_fix_si(ExtMap, isl_dim_out, MMI.j, 0);
1176   NewStmt = Stmt->getParent()->addScopStmt(OldAcc, MMI.A->getAccessRelation(),
1177                                            isl_set_copy(Domain));
1178 
1179   // Restrict the domains of the copy statements to only execute when also its
1180   // originating statement is executed.
1181   ExtMap = isl_map_set_tuple_id(ExtMap, isl_dim_out, DomainId);
1182   ExtMap = isl_map_intersect_range(ExtMap, Domain);
1183   ExtMap = isl_map_set_tuple_id(ExtMap, isl_dim_out, NewStmt->getDomainId());
1184   Node = createExtensionNode(Node, ExtMap);
1185   Node = isl_schedule_node_child(isl_schedule_node_child(Node, 0), 0);
1186   return isl_schedule_node_child(isl_schedule_node_child(Node, 0), 0);
1187 }
1188 
1189 /// Get a relation mapping induction variables produced by schedule
1190 /// transformations to the original ones.
1191 ///
1192 /// @param Node The schedule node produced as the result of creation
1193 ///        of the BLIS kernels.
1194 /// @param MicroKernelParams, MacroKernelParams Parameters of the BLIS kernel
1195 ///                                             to be taken into account.
1196 /// @return  The relation mapping original induction variables to the ones
1197 ///          produced by schedule transformation.
1198 /// @see ScheduleTreeOptimizer::createMicroKernel
1199 /// @see ScheduleTreeOptimizer::createMacroKernel
1200 /// @see getMacroKernelParams
1201 __isl_give isl_map *
1202 getInductionVariablesSubstitution(__isl_take isl_schedule_node *Node,
1203                                   MicroKernelParamsTy MicroKernelParams,
1204                                   MacroKernelParamsTy MacroKernelParams) {
1205   auto *Child = isl_schedule_node_get_child(Node, 0);
1206   auto *UnMapOldIndVar = isl_schedule_node_get_prefix_schedule_union_map(Child);
1207   isl_schedule_node_free(Child);
1208   auto *MapOldIndVar = isl_map_from_union_map(UnMapOldIndVar);
1209   if (isl_map_dim(MapOldIndVar, isl_dim_out) > 9)
1210     MapOldIndVar =
1211         isl_map_project_out(MapOldIndVar, isl_dim_out, 0,
1212                             isl_map_dim(MapOldIndVar, isl_dim_out) - 9);
1213   return MapOldIndVar;
1214 }
1215 
1216 /// Isolate a set of partial tile prefixes and unroll the isolated part.
1217 ///
1218 /// The set should ensure that it contains only partial tile prefixes that have
1219 /// exactly Mr x Nr iterations of the two innermost loops produced by
1220 /// the optimization of the matrix multiplication. Mr and Nr are parameters of
1221 /// the micro-kernel.
1222 ///
1223 /// In case of parametric bounds, this helps to auto-vectorize the unrolled
1224 /// innermost loops, using the SLP vectorizer.
1225 ///
1226 /// @param Node              The schedule node to be modified.
1227 /// @param MicroKernelParams Parameters of the micro-kernel
1228 ///                          to be taken into account.
1229 /// @return The modified isl_schedule_node.
1230 static __isl_give isl_schedule_node *
1231 isolateAndUnrollMatMulInnerLoops(__isl_take isl_schedule_node *Node,
1232                                  struct MicroKernelParamsTy MicroKernelParams) {
1233   auto *Child = isl_schedule_node_get_child(Node, 0);
1234   auto *UnMapOldIndVar = isl_schedule_node_get_prefix_schedule_relation(Child);
1235   isl_schedule_node_free(Child);
1236   auto *Prefix = isl_map_range(isl_map_from_union_map(UnMapOldIndVar));
1237   auto Dims = isl_set_dim(Prefix, isl_dim_set);
1238   Prefix = isl_set_project_out(Prefix, isl_dim_set, Dims - 1, 1);
1239   Prefix = getPartialTilePrefixes(Prefix, MicroKernelParams.Nr);
1240   Prefix = getPartialTilePrefixes(Prefix, MicroKernelParams.Mr);
1241   auto *IsolateOption = getIsolateOptions(
1242       isl_set_add_dims(isl_set_copy(Prefix), isl_dim_set, 3), 3);
1243   auto *Ctx = isl_schedule_node_get_ctx(Node);
1244   auto *AtomicOption = getAtomicOptions(Ctx);
1245   auto *Options =
1246       isl_union_set_union(IsolateOption, isl_union_set_copy(AtomicOption));
1247   Options = isl_union_set_union(Options, getUnrollIsolatedSetOptions(Ctx));
1248   Node = isl_schedule_node_band_set_ast_build_options(Node, Options);
1249   Node = isl_schedule_node_parent(isl_schedule_node_parent(Node));
1250   IsolateOption = getIsolateOptions(Prefix, 3);
1251   Options = isl_union_set_union(IsolateOption, AtomicOption);
1252   Node = isl_schedule_node_band_set_ast_build_options(Node, Options);
1253   Node = isl_schedule_node_child(isl_schedule_node_child(Node, 0), 0);
1254   return Node;
1255 }
1256 
1257 /// Mark @p BasePtr with "Inter iteration alias-free" mark node.
1258 ///
1259 /// @param Node The child of the mark node to be inserted.
1260 /// @param BasePtr The pointer to be marked.
1261 /// @return The modified isl_schedule_node.
1262 static isl_schedule_node *markInterIterationAliasFree(isl_schedule_node *Node,
1263                                                       llvm::Value *BasePtr) {
1264   if (!BasePtr)
1265     return Node;
1266 
1267   auto *Ctx = isl_schedule_node_get_ctx(Node);
1268   auto *Id = isl_id_alloc(Ctx, "Inter iteration alias-free", BasePtr);
1269   return isl_schedule_node_child(isl_schedule_node_insert_mark(Node, Id), 0);
1270 }
1271 
1272 /// Restore the initial ordering of dimensions of the band node
1273 ///
1274 /// In case the band node represents all the dimensions of the iteration
1275 /// domain, recreate the band node to restore the initial ordering of the
1276 /// dimensions.
1277 ///
1278 /// @param Node The band node to be modified.
1279 /// @return The modified schedule node.
1280 namespace {
1281 isl::schedule_node getBandNodeWithOriginDimOrder(isl::schedule_node Node) {
1282   assert(isl_schedule_node_get_type(Node.keep()) == isl_schedule_node_band);
1283   if (isl_schedule_node_get_type(Node.child(0).keep()) !=
1284       isl_schedule_node_leaf)
1285     return Node;
1286   auto Domain = isl::manage(isl_schedule_node_get_universe_domain(Node.keep()));
1287   assert(isl_union_set_n_set(Domain.keep()) == 1);
1288   if (isl_schedule_node_get_schedule_depth(Node.keep()) != 0 ||
1289       (isl::set(isl::manage(Domain.copy())).dim(isl::dim::set) !=
1290        isl_schedule_node_band_n_member(Node.keep())))
1291     return Node;
1292   Node = isl::manage(isl_schedule_node_delete(Node.take()));
1293   auto PartialSchedulePwAff =
1294       isl::manage(isl_union_set_identity_union_pw_multi_aff(Domain.take()));
1295   auto PartialScheduleMultiPwAff =
1296       isl::multi_union_pw_aff(PartialSchedulePwAff);
1297   PartialScheduleMultiPwAff = isl::manage(isl_multi_union_pw_aff_reset_tuple_id(
1298       PartialScheduleMultiPwAff.take(), isl_dim_set));
1299   return isl::manage(isl_schedule_node_insert_partial_schedule(
1300       Node.take(), PartialScheduleMultiPwAff.take()));
1301 }
1302 } // namespace
1303 
1304 __isl_give isl_schedule_node *ScheduleTreeOptimizer::optimizeMatMulPattern(
1305     __isl_take isl_schedule_node *Node, const llvm::TargetTransformInfo *TTI,
1306     MatMulInfoTy &MMI) {
1307   assert(TTI && "The target transform info should be provided.");
1308   Node = markInterIterationAliasFree(
1309       Node, MMI.WriteToC->getLatestScopArrayInfo()->getBasePtr());
1310   int DimOutNum = isl_schedule_node_band_n_member(Node);
1311   assert(DimOutNum > 2 && "In case of the matrix multiplication the loop nest "
1312                           "and, consequently, the corresponding scheduling "
1313                           "functions have at least three dimensions.");
1314   Node = getBandNodeWithOriginDimOrder(isl::manage(Node)).take();
1315   Node = permuteBandNodeDimensions(Node, MMI.i, DimOutNum - 3);
1316   int NewJ = MMI.j == DimOutNum - 3 ? MMI.i : MMI.j;
1317   int NewK = MMI.k == DimOutNum - 3 ? MMI.i : MMI.k;
1318   Node = permuteBandNodeDimensions(Node, NewJ, DimOutNum - 2);
1319   NewK = NewK == DimOutNum - 2 ? NewJ : NewK;
1320   Node = permuteBandNodeDimensions(Node, NewK, DimOutNum - 1);
1321   auto MicroKernelParams = getMicroKernelParams(TTI, MMI);
1322   auto MacroKernelParams = getMacroKernelParams(MicroKernelParams, MMI);
1323   Node = createMacroKernel(Node, MacroKernelParams);
1324   Node = createMicroKernel(Node, MicroKernelParams);
1325   if (MacroKernelParams.Mc == 1 || MacroKernelParams.Nc == 1 ||
1326       MacroKernelParams.Kc == 1)
1327     return Node;
1328   auto *MapOldIndVar = getInductionVariablesSubstitution(
1329       Node, MicroKernelParams, MacroKernelParams);
1330   if (!MapOldIndVar)
1331     return Node;
1332   Node = isolateAndUnrollMatMulInnerLoops(Node, MicroKernelParams);
1333   return optimizeDataLayoutMatrMulPattern(Node, MapOldIndVar, MicroKernelParams,
1334                                           MacroKernelParams, MMI);
1335 }
1336 
1337 bool ScheduleTreeOptimizer::isMatrMultPattern(
1338     __isl_keep isl_schedule_node *Node, const Dependences *D,
1339     MatMulInfoTy &MMI) {
1340   auto *PartialSchedule =
1341       isl_schedule_node_band_get_partial_schedule_union_map(Node);
1342   Node = isl_schedule_node_child(Node, 0);
1343   auto LeafType = isl_schedule_node_get_type(Node);
1344   Node = isl_schedule_node_parent(Node);
1345   if (LeafType != isl_schedule_node_leaf ||
1346       isl_schedule_node_band_n_member(Node) < 3 ||
1347       isl_schedule_node_get_schedule_depth(Node) != 0 ||
1348       isl_union_map_n_map(PartialSchedule) != 1) {
1349     isl_union_map_free(PartialSchedule);
1350     return false;
1351   }
1352   auto *NewPartialSchedule = isl_map_from_union_map(PartialSchedule);
1353   if (containsMatrMult(NewPartialSchedule, D, MMI)) {
1354     isl_map_free(NewPartialSchedule);
1355     return true;
1356   }
1357   isl_map_free(NewPartialSchedule);
1358   return false;
1359 }
1360 
1361 __isl_give isl_schedule_node *
1362 ScheduleTreeOptimizer::optimizeBand(__isl_take isl_schedule_node *Node,
1363                                     void *User) {
1364   if (!isTileableBandNode(Node))
1365     return Node;
1366 
1367   const OptimizerAdditionalInfoTy *OAI =
1368       static_cast<const OptimizerAdditionalInfoTy *>(User);
1369 
1370   MatMulInfoTy MMI;
1371   if (PMBasedOpts && User && isMatrMultPattern(Node, OAI->D, MMI)) {
1372     DEBUG(dbgs() << "The matrix multiplication pattern was detected\n");
1373     return optimizeMatMulPattern(Node, OAI->TTI, MMI);
1374   }
1375 
1376   return standardBandOpts(Node, User);
1377 }
1378 
1379 __isl_give isl_schedule *
1380 ScheduleTreeOptimizer::optimizeSchedule(__isl_take isl_schedule *Schedule,
1381                                         const OptimizerAdditionalInfoTy *OAI) {
1382   isl_schedule_node *Root = isl_schedule_get_root(Schedule);
1383   Root = optimizeScheduleNode(Root, OAI);
1384   isl_schedule_free(Schedule);
1385   auto S = isl_schedule_node_get_schedule(Root);
1386   isl_schedule_node_free(Root);
1387   return S;
1388 }
1389 
1390 __isl_give isl_schedule_node *ScheduleTreeOptimizer::optimizeScheduleNode(
1391     __isl_take isl_schedule_node *Node, const OptimizerAdditionalInfoTy *OAI) {
1392   Node = isl_schedule_node_map_descendant_bottom_up(
1393       Node, optimizeBand, const_cast<void *>(static_cast<const void *>(OAI)));
1394   return Node;
1395 }
1396 
1397 bool ScheduleTreeOptimizer::isProfitableSchedule(
1398     Scop &S, __isl_keep isl_schedule *NewSchedule) {
1399   // To understand if the schedule has been optimized we check if the schedule
1400   // has changed at all.
1401   // TODO: We can improve this by tracking if any necessarily beneficial
1402   // transformations have been performed. This can e.g. be tiling, loop
1403   // interchange, or ...) We can track this either at the place where the
1404   // transformation has been performed or, in case of automatic ILP based
1405   // optimizations, by comparing (yet to be defined) performance metrics
1406   // before/after the scheduling optimizer
1407   // (e.g., #stride-one accesses)
1408   if (S.containsExtensionNode(NewSchedule))
1409     return true;
1410   auto *NewScheduleMap = isl_schedule_get_map(NewSchedule);
1411   isl_union_map *OldSchedule = S.getSchedule();
1412   assert(OldSchedule && "Only IslScheduleOptimizer can insert extension nodes "
1413                         "that make Scop::getSchedule() return nullptr.");
1414   bool changed = !isl_union_map_is_equal(OldSchedule, NewScheduleMap);
1415   isl_union_map_free(OldSchedule);
1416   isl_union_map_free(NewScheduleMap);
1417   return changed;
1418 }
1419 
1420 namespace {
1421 class IslScheduleOptimizer : public ScopPass {
1422 public:
1423   static char ID;
1424   explicit IslScheduleOptimizer() : ScopPass(ID) { LastSchedule = nullptr; }
1425 
1426   ~IslScheduleOptimizer() { isl_schedule_free(LastSchedule); }
1427 
1428   /// Optimize the schedule of the SCoP @p S.
1429   bool runOnScop(Scop &S) override;
1430 
1431   /// Print the new schedule for the SCoP @p S.
1432   void printScop(raw_ostream &OS, Scop &S) const override;
1433 
1434   /// Register all analyses and transformation required.
1435   void getAnalysisUsage(AnalysisUsage &AU) const override;
1436 
1437   /// Release the internal memory.
1438   void releaseMemory() override {
1439     isl_schedule_free(LastSchedule);
1440     LastSchedule = nullptr;
1441   }
1442 
1443 private:
1444   isl_schedule *LastSchedule;
1445 };
1446 } // namespace
1447 
1448 char IslScheduleOptimizer::ID = 0;
1449 
1450 bool IslScheduleOptimizer::runOnScop(Scop &S) {
1451 
1452   // Skip empty SCoPs but still allow code generation as it will delete the
1453   // loops present but not needed.
1454   if (S.getSize() == 0) {
1455     S.markAsOptimized();
1456     return false;
1457   }
1458 
1459   const Dependences &D =
1460       getAnalysis<DependenceInfo>().getDependences(Dependences::AL_Statement);
1461 
1462   if (!D.hasValidDependences())
1463     return false;
1464 
1465   isl_schedule_free(LastSchedule);
1466   LastSchedule = nullptr;
1467 
1468   // Build input data.
1469   int ValidityKinds =
1470       Dependences::TYPE_RAW | Dependences::TYPE_WAR | Dependences::TYPE_WAW;
1471   int ProximityKinds;
1472 
1473   if (OptimizeDeps == "all")
1474     ProximityKinds =
1475         Dependences::TYPE_RAW | Dependences::TYPE_WAR | Dependences::TYPE_WAW;
1476   else if (OptimizeDeps == "raw")
1477     ProximityKinds = Dependences::TYPE_RAW;
1478   else {
1479     errs() << "Do not know how to optimize for '" << OptimizeDeps << "'"
1480            << " Falling back to optimizing all dependences.\n";
1481     ProximityKinds =
1482         Dependences::TYPE_RAW | Dependences::TYPE_WAR | Dependences::TYPE_WAW;
1483   }
1484 
1485   isl::union_set Domain = give(S.getDomains());
1486 
1487   if (!Domain)
1488     return false;
1489 
1490   isl::union_map Validity = give(D.getDependences(ValidityKinds));
1491   isl::union_map Proximity = give(D.getDependences(ProximityKinds));
1492 
1493   // Simplify the dependences by removing the constraints introduced by the
1494   // domains. This can speed up the scheduling time significantly, as large
1495   // constant coefficients will be removed from the dependences. The
1496   // introduction of some additional dependences reduces the possible
1497   // transformations, but in most cases, such transformation do not seem to be
1498   // interesting anyway. In some cases this option may stop the scheduler to
1499   // find any schedule.
1500   if (SimplifyDeps == "yes") {
1501     Validity = Validity.gist_domain(Domain);
1502     Validity = Validity.gist_range(Domain);
1503     Proximity = Proximity.gist_domain(Domain);
1504     Proximity = Proximity.gist_range(Domain);
1505   } else if (SimplifyDeps != "no") {
1506     errs() << "warning: Option -polly-opt-simplify-deps should either be 'yes' "
1507               "or 'no'. Falling back to default: 'yes'\n";
1508   }
1509 
1510   DEBUG(dbgs() << "\n\nCompute schedule from: ");
1511   DEBUG(dbgs() << "Domain := " << Domain << ";\n");
1512   DEBUG(dbgs() << "Proximity := " << Proximity << ";\n");
1513   DEBUG(dbgs() << "Validity := " << Validity << ";\n");
1514 
1515   unsigned IslSerializeSCCs;
1516 
1517   if (FusionStrategy == "max") {
1518     IslSerializeSCCs = 0;
1519   } else if (FusionStrategy == "min") {
1520     IslSerializeSCCs = 1;
1521   } else {
1522     errs() << "warning: Unknown fusion strategy. Falling back to maximal "
1523               "fusion.\n";
1524     IslSerializeSCCs = 0;
1525   }
1526 
1527   int IslMaximizeBands;
1528 
1529   if (MaximizeBandDepth == "yes") {
1530     IslMaximizeBands = 1;
1531   } else if (MaximizeBandDepth == "no") {
1532     IslMaximizeBands = 0;
1533   } else {
1534     errs() << "warning: Option -polly-opt-maximize-bands should either be 'yes'"
1535               " or 'no'. Falling back to default: 'yes'\n";
1536     IslMaximizeBands = 1;
1537   }
1538 
1539   int IslOuterCoincidence;
1540 
1541   if (OuterCoincidence == "yes") {
1542     IslOuterCoincidence = 1;
1543   } else if (OuterCoincidence == "no") {
1544     IslOuterCoincidence = 0;
1545   } else {
1546     errs() << "warning: Option -polly-opt-outer-coincidence should either be "
1547               "'yes' or 'no'. Falling back to default: 'no'\n";
1548     IslOuterCoincidence = 0;
1549   }
1550 
1551   isl_ctx *Ctx = S.getIslCtx();
1552 
1553   isl_options_set_schedule_outer_coincidence(Ctx, IslOuterCoincidence);
1554   isl_options_set_schedule_serialize_sccs(Ctx, IslSerializeSCCs);
1555   isl_options_set_schedule_maximize_band_depth(Ctx, IslMaximizeBands);
1556   isl_options_set_schedule_max_constant_term(Ctx, MaxConstantTerm);
1557   isl_options_set_schedule_max_coefficient(Ctx, MaxCoefficient);
1558   isl_options_set_tile_scale_tile_loops(Ctx, 0);
1559 
1560   auto OnErrorStatus = isl_options_get_on_error(Ctx);
1561   isl_options_set_on_error(Ctx, ISL_ON_ERROR_CONTINUE);
1562 
1563   auto SC = isl::schedule_constraints::on_domain(Domain);
1564   SC = SC.set_proximity(Proximity);
1565   SC = SC.set_validity(Validity);
1566   SC = SC.set_coincidence(Validity);
1567   isl_schedule *Schedule;
1568   Schedule = SC.compute_schedule().release();
1569   isl_options_set_on_error(Ctx, OnErrorStatus);
1570 
1571   // In cases the scheduler is not able to optimize the code, we just do not
1572   // touch the schedule.
1573   if (!Schedule)
1574     return false;
1575 
1576   DEBUG({
1577     auto *P = isl_printer_to_str(Ctx);
1578     P = isl_printer_set_yaml_style(P, ISL_YAML_STYLE_BLOCK);
1579     P = isl_printer_print_schedule(P, Schedule);
1580     auto *str = isl_printer_get_str(P);
1581     dbgs() << "NewScheduleTree: \n" << str << "\n";
1582     free(str);
1583     isl_printer_free(P);
1584   });
1585 
1586   Function &F = S.getFunction();
1587   auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
1588   const OptimizerAdditionalInfoTy OAI = {TTI, const_cast<Dependences *>(&D)};
1589   isl_schedule *NewSchedule =
1590       ScheduleTreeOptimizer::optimizeSchedule(Schedule, &OAI);
1591 
1592   if (!ScheduleTreeOptimizer::isProfitableSchedule(S, NewSchedule)) {
1593     isl_schedule_free(NewSchedule);
1594     return false;
1595   }
1596 
1597   S.setScheduleTree(NewSchedule);
1598   S.markAsOptimized();
1599 
1600   if (OptimizedScops)
1601     S.dump();
1602 
1603   return false;
1604 }
1605 
1606 void IslScheduleOptimizer::printScop(raw_ostream &OS, Scop &) const {
1607   isl_printer *p;
1608   char *ScheduleStr;
1609 
1610   OS << "Calculated schedule:\n";
1611 
1612   if (!LastSchedule) {
1613     OS << "n/a\n";
1614     return;
1615   }
1616 
1617   p = isl_printer_to_str(isl_schedule_get_ctx(LastSchedule));
1618   p = isl_printer_print_schedule(p, LastSchedule);
1619   ScheduleStr = isl_printer_get_str(p);
1620   isl_printer_free(p);
1621 
1622   OS << ScheduleStr << "\n";
1623 }
1624 
1625 void IslScheduleOptimizer::getAnalysisUsage(AnalysisUsage &AU) const {
1626   ScopPass::getAnalysisUsage(AU);
1627   AU.addRequired<DependenceInfo>();
1628   AU.addRequired<TargetTransformInfoWrapperPass>();
1629 }
1630 
1631 Pass *polly::createIslScheduleOptimizerPass() {
1632   return new IslScheduleOptimizer();
1633 }
1634 
1635 INITIALIZE_PASS_BEGIN(IslScheduleOptimizer, "polly-opt-isl",
1636                       "Polly - Optimize schedule of SCoP", false, false);
1637 INITIALIZE_PASS_DEPENDENCY(DependenceInfo);
1638 INITIALIZE_PASS_DEPENDENCY(ScopInfoRegionPass);
1639 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass);
1640 INITIALIZE_PASS_END(IslScheduleOptimizer, "polly-opt-isl",
1641                     "Polly - Optimize schedule of SCoP", false, false)
1642