1 //===- Schedule.cpp - Calculate an optimized schedule ---------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This pass generates an entirely new schedule tree from the data dependences
11 // and iteration domains. The new schedule tree is computed in two steps:
12 //
13 // 1) The isl scheduling optimizer is run
14 //
15 // The isl scheduling optimizer creates a new schedule tree that maximizes
16 // parallelism and tileability and minimizes data-dependence distances. The
17 // algorithm used is a modified version of the ``Pluto'' algorithm:
18 //
19 //   U. Bondhugula, A. Hartono, J. Ramanujam, and P. Sadayappan.
20 //   A Practical Automatic Polyhedral Parallelizer and Locality Optimizer.
21 //   In Proceedings of the 2008 ACM SIGPLAN Conference On Programming Language
22 //   Design and Implementation, PLDI ’08, pages 101–113. ACM, 2008.
23 //
24 // 2) A set of post-scheduling transformations is applied on the schedule tree.
25 //
26 // These optimizations include:
27 //
28 //  - Tiling of the innermost tilable bands
29 //  - Prevectorization - The coice of a possible outer loop that is strip-mined
30 //                       to the innermost level to enable inner-loop
31 //                       vectorization.
32 //  - Some optimizations for spatial locality are also planned.
33 //
34 // For a detailed description of the schedule tree itself please see section 6
35 // of:
36 //
37 // Polyhedral AST generation is more than scanning polyhedra
38 // Tobias Grosser, Sven Verdoolaege, Albert Cohen
39 // ACM Transations on Programming Languages and Systems (TOPLAS),
40 // 37(4), July 2015
41 // http://www.grosser.es/#pub-polyhedral-AST-generation
42 //
43 // This publication also contains a detailed discussion of the different options
44 // for polyhedral loop unrolling, full/partial tile separation and other uses
45 // of the schedule tree.
46 //
47 //===----------------------------------------------------------------------===//
48 
49 #include "polly/ScheduleOptimizer.h"
50 #include "polly/CodeGen/CodeGeneration.h"
51 #include "polly/DependenceInfo.h"
52 #include "polly/LinkAllPasses.h"
53 #include "polly/Options.h"
54 #include "polly/ScopInfo.h"
55 #include "polly/Support/GICHelper.h"
56 #include "llvm/Analysis/TargetTransformInfo.h"
57 #include "llvm/Support/Debug.h"
58 #include "isl/aff.h"
59 #include "isl/band.h"
60 #include "isl/constraint.h"
61 #include "isl/map.h"
62 #include "isl/options.h"
63 #include "isl/printer.h"
64 #include "isl/schedule.h"
65 #include "isl/schedule_node.h"
66 #include "isl/space.h"
67 #include "isl/union_map.h"
68 #include "isl/union_set.h"
69 
70 using namespace llvm;
71 using namespace polly;
72 
73 #define DEBUG_TYPE "polly-opt-isl"
74 
75 static cl::opt<std::string>
76     OptimizeDeps("polly-opt-optimize-only",
77                  cl::desc("Only a certain kind of dependences (all/raw)"),
78                  cl::Hidden, cl::init("all"), cl::ZeroOrMore,
79                  cl::cat(PollyCategory));
80 
81 static cl::opt<std::string>
82     SimplifyDeps("polly-opt-simplify-deps",
83                  cl::desc("Dependences should be simplified (yes/no)"),
84                  cl::Hidden, cl::init("yes"), cl::ZeroOrMore,
85                  cl::cat(PollyCategory));
86 
87 static cl::opt<int> MaxConstantTerm(
88     "polly-opt-max-constant-term",
89     cl::desc("The maximal constant term allowed (-1 is unlimited)"), cl::Hidden,
90     cl::init(20), cl::ZeroOrMore, cl::cat(PollyCategory));
91 
92 static cl::opt<int> MaxCoefficient(
93     "polly-opt-max-coefficient",
94     cl::desc("The maximal coefficient allowed (-1 is unlimited)"), cl::Hidden,
95     cl::init(20), cl::ZeroOrMore, cl::cat(PollyCategory));
96 
97 static cl::opt<std::string> FusionStrategy(
98     "polly-opt-fusion", cl::desc("The fusion strategy to choose (min/max)"),
99     cl::Hidden, cl::init("min"), cl::ZeroOrMore, cl::cat(PollyCategory));
100 
101 static cl::opt<std::string>
102     MaximizeBandDepth("polly-opt-maximize-bands",
103                       cl::desc("Maximize the band depth (yes/no)"), cl::Hidden,
104                       cl::init("yes"), cl::ZeroOrMore, cl::cat(PollyCategory));
105 
106 static cl::opt<std::string> OuterCoincidence(
107     "polly-opt-outer-coincidence",
108     cl::desc("Try to construct schedules where the outer member of each band "
109              "satisfies the coincidence constraints (yes/no)"),
110     cl::Hidden, cl::init("no"), cl::ZeroOrMore, cl::cat(PollyCategory));
111 
112 static cl::opt<int> PrevectorWidth(
113     "polly-prevect-width",
114     cl::desc(
115         "The number of loop iterations to strip-mine for pre-vectorization"),
116     cl::Hidden, cl::init(4), cl::ZeroOrMore, cl::cat(PollyCategory));
117 
118 static cl::opt<bool> FirstLevelTiling("polly-tiling",
119                                       cl::desc("Enable loop tiling"),
120                                       cl::init(true), cl::ZeroOrMore,
121                                       cl::cat(PollyCategory));
122 
123 static cl::opt<int> LatencyVectorFma(
124     "polly-target-latency-vector-fma",
125     cl::desc("The minimal number of cycles between issuing two "
126              "dependent consecutive vector fused multiply-add "
127              "instructions."),
128     cl::Hidden, cl::init(8), cl::ZeroOrMore, cl::cat(PollyCategory));
129 
130 static cl::opt<int> ThroughputVectorFma(
131     "polly-target-throughput-vector-fma",
132     cl::desc("A throughput of the processor floating-point arithmetic units "
133              "expressed in the number of vector fused multiply-add "
134              "instructions per clock cycle."),
135     cl::Hidden, cl::init(1), cl::ZeroOrMore, cl::cat(PollyCategory));
136 
137 // This option, along with --polly-target-2nd-cache-level-associativity,
138 // --polly-target-1st-cache-level-size, and --polly-target-2st-cache-level-size
139 // represent the parameters of the target cache, which do not have typical
140 // values that can be used by default. However, to apply the pattern matching
141 // optimizations, we use the values of the parameters of Intel Core i7-3820
142 // SandyBridge in case the parameters are not specified. Such an approach helps
143 // also to attain the high-performance on IBM POWER System S822 and IBM Power
144 // 730 Express server.
145 static cl::opt<int> FirstCacheLevelAssociativity(
146     "polly-target-1st-cache-level-associativity",
147     cl::desc("The associativity of the first cache level."), cl::Hidden,
148     cl::init(8), cl::ZeroOrMore, cl::cat(PollyCategory));
149 
150 static cl::opt<int> SecondCacheLevelAssociativity(
151     "polly-target-2nd-cache-level-associativity",
152     cl::desc("The associativity of the second cache level."), cl::Hidden,
153     cl::init(8), cl::ZeroOrMore, cl::cat(PollyCategory));
154 
155 static cl::opt<int> FirstCacheLevelSize(
156     "polly-target-1st-cache-level-size",
157     cl::desc("The size of the first cache level specified in bytes."),
158     cl::Hidden, cl::init(32768), cl::ZeroOrMore, cl::cat(PollyCategory));
159 
160 static cl::opt<int> SecondCacheLevelSize(
161     "polly-target-2nd-cache-level-size",
162     cl::desc("The size of the second level specified in bytes."), cl::Hidden,
163     cl::init(262144), cl::ZeroOrMore, cl::cat(PollyCategory));
164 
165 static cl::opt<int> VectorRegisterBitwidth(
166     "polly-target-vector-register-bitwidth",
167     cl::desc("The size in bits of a vector register (if not set, this "
168              "information is taken from LLVM's target information."),
169     cl::Hidden, cl::init(-1), cl::ZeroOrMore, cl::cat(PollyCategory));
170 
171 static cl::opt<int> FirstLevelDefaultTileSize(
172     "polly-default-tile-size",
173     cl::desc("The default tile size (if not enough were provided by"
174              " --polly-tile-sizes)"),
175     cl::Hidden, cl::init(32), cl::ZeroOrMore, cl::cat(PollyCategory));
176 
177 static cl::list<int>
178     FirstLevelTileSizes("polly-tile-sizes",
179                         cl::desc("A tile size for each loop dimension, filled "
180                                  "with --polly-default-tile-size"),
181                         cl::Hidden, cl::ZeroOrMore, cl::CommaSeparated,
182                         cl::cat(PollyCategory));
183 
184 static cl::opt<bool>
185     SecondLevelTiling("polly-2nd-level-tiling",
186                       cl::desc("Enable a 2nd level loop of loop tiling"),
187                       cl::init(false), cl::ZeroOrMore, cl::cat(PollyCategory));
188 
189 static cl::opt<int> SecondLevelDefaultTileSize(
190     "polly-2nd-level-default-tile-size",
191     cl::desc("The default 2nd-level tile size (if not enough were provided by"
192              " --polly-2nd-level-tile-sizes)"),
193     cl::Hidden, cl::init(16), cl::ZeroOrMore, cl::cat(PollyCategory));
194 
195 static cl::list<int>
196     SecondLevelTileSizes("polly-2nd-level-tile-sizes",
197                          cl::desc("A tile size for each loop dimension, filled "
198                                   "with --polly-default-tile-size"),
199                          cl::Hidden, cl::ZeroOrMore, cl::CommaSeparated,
200                          cl::cat(PollyCategory));
201 
202 static cl::opt<bool> RegisterTiling("polly-register-tiling",
203                                     cl::desc("Enable register tiling"),
204                                     cl::init(false), cl::ZeroOrMore,
205                                     cl::cat(PollyCategory));
206 
207 static cl::opt<int> RegisterDefaultTileSize(
208     "polly-register-tiling-default-tile-size",
209     cl::desc("The default register tile size (if not enough were provided by"
210              " --polly-register-tile-sizes)"),
211     cl::Hidden, cl::init(2), cl::ZeroOrMore, cl::cat(PollyCategory));
212 
213 static cl::opt<int> PollyPatternMatchingNcQuotient(
214     "polly-pattern-matching-nc-quotient",
215     cl::desc("Quotient that is obtained by dividing Nc, the parameter of the"
216              "macro-kernel, by Nr, the parameter of the micro-kernel"),
217     cl::Hidden, cl::init(256), cl::ZeroOrMore, cl::cat(PollyCategory));
218 
219 static cl::list<int>
220     RegisterTileSizes("polly-register-tile-sizes",
221                       cl::desc("A tile size for each loop dimension, filled "
222                                "with --polly-register-tile-size"),
223                       cl::Hidden, cl::ZeroOrMore, cl::CommaSeparated,
224                       cl::cat(PollyCategory));
225 
226 static cl::opt<bool>
227     PMBasedOpts("polly-pattern-matching-based-opts",
228                 cl::desc("Perform optimizations based on pattern matching"),
229                 cl::init(false), cl::ZeroOrMore, cl::cat(PollyCategory));
230 
231 static cl::opt<bool> OptimizedScops(
232     "polly-optimized-scops",
233     cl::desc("Polly - Dump polyhedral description of Scops optimized with "
234              "the isl scheduling optimizer and the set of post-scheduling "
235              "transformations is applied on the schedule tree"),
236     cl::init(false), cl::ZeroOrMore, cl::cat(PollyCategory));
237 
238 /// Create an isl_union_set, which describes the isolate option based on
239 /// IsoalteDomain.
240 ///
241 /// @param IsolateDomain An isl_set whose last dimension is the only one that
242 ///                      should belong to the current band node.
243 static __isl_give isl_union_set *
244 getIsolateOptions(__isl_take isl_set *IsolateDomain) {
245   auto Dims = isl_set_dim(IsolateDomain, isl_dim_set);
246   auto *IsolateRelation = isl_map_from_domain(IsolateDomain);
247   IsolateRelation = isl_map_move_dims(IsolateRelation, isl_dim_out, 0,
248                                       isl_dim_in, Dims - 1, 1);
249   auto *IsolateOption = isl_map_wrap(IsolateRelation);
250   auto *Id = isl_id_alloc(isl_set_get_ctx(IsolateOption), "isolate", nullptr);
251   return isl_union_set_from_set(isl_set_set_tuple_id(IsolateOption, Id));
252 }
253 
254 /// Create an isl_union_set, which describes the atomic option for the dimension
255 /// of the current node.
256 ///
257 /// It may help to reduce the size of generated code.
258 ///
259 /// @param Ctx An isl_ctx, which is used to create the isl_union_set.
260 static __isl_give isl_union_set *getAtomicOptions(__isl_take isl_ctx *Ctx) {
261   auto *Space = isl_space_set_alloc(Ctx, 0, 1);
262   auto *AtomicOption = isl_set_universe(Space);
263   auto *Id = isl_id_alloc(Ctx, "atomic", nullptr);
264   return isl_union_set_from_set(isl_set_set_tuple_id(AtomicOption, Id));
265 }
266 
267 /// Make the last dimension of Set to take values from 0 to VectorWidth - 1.
268 ///
269 /// @param Set         A set, which should be modified.
270 /// @param VectorWidth A parameter, which determines the constraint.
271 static __isl_give isl_set *addExtentConstraints(__isl_take isl_set *Set,
272                                                 int VectorWidth) {
273   auto Dims = isl_set_dim(Set, isl_dim_set);
274   auto Space = isl_set_get_space(Set);
275   auto *LocalSpace = isl_local_space_from_space(Space);
276   auto *ExtConstr =
277       isl_constraint_alloc_inequality(isl_local_space_copy(LocalSpace));
278   ExtConstr = isl_constraint_set_constant_si(ExtConstr, 0);
279   ExtConstr =
280       isl_constraint_set_coefficient_si(ExtConstr, isl_dim_set, Dims - 1, 1);
281   Set = isl_set_add_constraint(Set, ExtConstr);
282   ExtConstr = isl_constraint_alloc_inequality(LocalSpace);
283   ExtConstr = isl_constraint_set_constant_si(ExtConstr, VectorWidth - 1);
284   ExtConstr =
285       isl_constraint_set_coefficient_si(ExtConstr, isl_dim_set, Dims - 1, -1);
286   return isl_set_add_constraint(Set, ExtConstr);
287 }
288 
289 /// Build the desired set of partial tile prefixes.
290 ///
291 /// We build a set of partial tile prefixes, which are prefixes of the vector
292 /// loop that have exactly VectorWidth iterations.
293 ///
294 /// 1. Get all prefixes of the vector loop.
295 /// 2. Extend it to a set, which has exactly VectorWidth iterations for
296 ///    any prefix from the set that was built on the previous step.
297 /// 3. Subtract loop domain from it, project out the vector loop dimension and
298 ///    get a set of prefixes, which don't have exactly VectorWidth iterations.
299 /// 4. Subtract it from all prefixes of the vector loop and get the desired
300 ///    set.
301 ///
302 /// @param ScheduleRange A range of a map, which describes a prefix schedule
303 ///                      relation.
304 static __isl_give isl_set *
305 getPartialTilePrefixes(__isl_take isl_set *ScheduleRange, int VectorWidth) {
306   auto Dims = isl_set_dim(ScheduleRange, isl_dim_set);
307   auto *LoopPrefixes = isl_set_project_out(isl_set_copy(ScheduleRange),
308                                            isl_dim_set, Dims - 1, 1);
309   auto *ExtentPrefixes =
310       isl_set_add_dims(isl_set_copy(LoopPrefixes), isl_dim_set, 1);
311   ExtentPrefixes = addExtentConstraints(ExtentPrefixes, VectorWidth);
312   auto *BadPrefixes = isl_set_subtract(ExtentPrefixes, ScheduleRange);
313   BadPrefixes = isl_set_project_out(BadPrefixes, isl_dim_set, Dims - 1, 1);
314   return isl_set_subtract(LoopPrefixes, BadPrefixes);
315 }
316 
317 __isl_give isl_schedule_node *ScheduleTreeOptimizer::isolateFullPartialTiles(
318     __isl_take isl_schedule_node *Node, int VectorWidth) {
319   assert(isl_schedule_node_get_type(Node) == isl_schedule_node_band);
320   Node = isl_schedule_node_child(Node, 0);
321   Node = isl_schedule_node_child(Node, 0);
322   auto *SchedRelUMap = isl_schedule_node_get_prefix_schedule_relation(Node);
323   auto *ScheduleRelation = isl_map_from_union_map(SchedRelUMap);
324   auto *ScheduleRange = isl_map_range(ScheduleRelation);
325   auto *IsolateDomain = getPartialTilePrefixes(ScheduleRange, VectorWidth);
326   auto *AtomicOption = getAtomicOptions(isl_set_get_ctx(IsolateDomain));
327   auto *IsolateOption = getIsolateOptions(IsolateDomain);
328   Node = isl_schedule_node_parent(Node);
329   Node = isl_schedule_node_parent(Node);
330   auto *Options = isl_union_set_union(IsolateOption, AtomicOption);
331   Node = isl_schedule_node_band_set_ast_build_options(Node, Options);
332   return Node;
333 }
334 
335 __isl_give isl_schedule_node *
336 ScheduleTreeOptimizer::prevectSchedBand(__isl_take isl_schedule_node *Node,
337                                         unsigned DimToVectorize,
338                                         int VectorWidth) {
339   assert(isl_schedule_node_get_type(Node) == isl_schedule_node_band);
340 
341   auto Space = isl_schedule_node_band_get_space(Node);
342   auto ScheduleDimensions = isl_space_dim(Space, isl_dim_set);
343   isl_space_free(Space);
344   assert(DimToVectorize < ScheduleDimensions);
345 
346   if (DimToVectorize > 0) {
347     Node = isl_schedule_node_band_split(Node, DimToVectorize);
348     Node = isl_schedule_node_child(Node, 0);
349   }
350   if (DimToVectorize < ScheduleDimensions - 1)
351     Node = isl_schedule_node_band_split(Node, 1);
352   Space = isl_schedule_node_band_get_space(Node);
353   auto Sizes = isl_multi_val_zero(Space);
354   auto Ctx = isl_schedule_node_get_ctx(Node);
355   Sizes =
356       isl_multi_val_set_val(Sizes, 0, isl_val_int_from_si(Ctx, VectorWidth));
357   Node = isl_schedule_node_band_tile(Node, Sizes);
358   Node = isolateFullPartialTiles(Node, VectorWidth);
359   Node = isl_schedule_node_child(Node, 0);
360   // Make sure the "trivially vectorizable loop" is not unrolled. Otherwise,
361   // we will have troubles to match it in the backend.
362   Node = isl_schedule_node_band_set_ast_build_options(
363       Node, isl_union_set_read_from_str(Ctx, "{ unroll[x]: 1 = 0 }"));
364   Node = isl_schedule_node_band_sink(Node);
365   Node = isl_schedule_node_child(Node, 0);
366   if (isl_schedule_node_get_type(Node) == isl_schedule_node_leaf)
367     Node = isl_schedule_node_parent(Node);
368   isl_id *LoopMarker = isl_id_alloc(Ctx, "SIMD", nullptr);
369   Node = isl_schedule_node_insert_mark(Node, LoopMarker);
370   return Node;
371 }
372 
373 __isl_give isl_schedule_node *
374 ScheduleTreeOptimizer::tileNode(__isl_take isl_schedule_node *Node,
375                                 const char *Identifier, ArrayRef<int> TileSizes,
376                                 int DefaultTileSize) {
377   auto Ctx = isl_schedule_node_get_ctx(Node);
378   auto Space = isl_schedule_node_band_get_space(Node);
379   auto Dims = isl_space_dim(Space, isl_dim_set);
380   auto Sizes = isl_multi_val_zero(Space);
381   std::string IdentifierString(Identifier);
382   for (unsigned i = 0; i < Dims; i++) {
383     auto tileSize = i < TileSizes.size() ? TileSizes[i] : DefaultTileSize;
384     Sizes = isl_multi_val_set_val(Sizes, i, isl_val_int_from_si(Ctx, tileSize));
385   }
386   auto TileLoopMarkerStr = IdentifierString + " - Tiles";
387   isl_id *TileLoopMarker =
388       isl_id_alloc(Ctx, TileLoopMarkerStr.c_str(), nullptr);
389   Node = isl_schedule_node_insert_mark(Node, TileLoopMarker);
390   Node = isl_schedule_node_child(Node, 0);
391   Node = isl_schedule_node_band_tile(Node, Sizes);
392   Node = isl_schedule_node_child(Node, 0);
393   auto PointLoopMarkerStr = IdentifierString + " - Points";
394   isl_id *PointLoopMarker =
395       isl_id_alloc(Ctx, PointLoopMarkerStr.c_str(), nullptr);
396   Node = isl_schedule_node_insert_mark(Node, PointLoopMarker);
397   Node = isl_schedule_node_child(Node, 0);
398   return Node;
399 }
400 
401 __isl_give isl_schedule_node *
402 ScheduleTreeOptimizer::applyRegisterTiling(__isl_take isl_schedule_node *Node,
403                                            llvm::ArrayRef<int> TileSizes,
404                                            int DefaultTileSize) {
405   auto *Ctx = isl_schedule_node_get_ctx(Node);
406   Node = tileNode(Node, "Register tiling", TileSizes, DefaultTileSize);
407   Node = isl_schedule_node_band_set_ast_build_options(
408       Node, isl_union_set_read_from_str(Ctx, "{unroll[x]}"));
409   return Node;
410 }
411 
412 bool ScheduleTreeOptimizer::isTileableBandNode(
413     __isl_keep isl_schedule_node *Node) {
414   if (isl_schedule_node_get_type(Node) != isl_schedule_node_band)
415     return false;
416 
417   if (isl_schedule_node_n_children(Node) != 1)
418     return false;
419 
420   if (!isl_schedule_node_band_get_permutable(Node))
421     return false;
422 
423   auto Space = isl_schedule_node_band_get_space(Node);
424   auto Dims = isl_space_dim(Space, isl_dim_set);
425   isl_space_free(Space);
426 
427   if (Dims <= 1)
428     return false;
429 
430   auto Child = isl_schedule_node_get_child(Node, 0);
431   auto Type = isl_schedule_node_get_type(Child);
432   isl_schedule_node_free(Child);
433 
434   if (Type != isl_schedule_node_leaf)
435     return false;
436 
437   return true;
438 }
439 
440 __isl_give isl_schedule_node *
441 ScheduleTreeOptimizer::standardBandOpts(__isl_take isl_schedule_node *Node,
442                                         void *User) {
443   if (FirstLevelTiling)
444     Node = tileNode(Node, "1st level tiling", FirstLevelTileSizes,
445                     FirstLevelDefaultTileSize);
446 
447   if (SecondLevelTiling)
448     Node = tileNode(Node, "2nd level tiling", SecondLevelTileSizes,
449                     SecondLevelDefaultTileSize);
450 
451   if (RegisterTiling)
452     Node =
453         applyRegisterTiling(Node, RegisterTileSizes, RegisterDefaultTileSize);
454 
455   if (PollyVectorizerChoice == VECTORIZER_NONE)
456     return Node;
457 
458   auto Space = isl_schedule_node_band_get_space(Node);
459   auto Dims = isl_space_dim(Space, isl_dim_set);
460   isl_space_free(Space);
461 
462   for (int i = Dims - 1; i >= 0; i--)
463     if (isl_schedule_node_band_member_get_coincident(Node, i)) {
464       Node = prevectSchedBand(Node, i, PrevectorWidth);
465       break;
466     }
467 
468   return Node;
469 }
470 
471 /// Get the position of a dimension with a non-zero coefficient.
472 ///
473 /// Check that isl constraint @p Constraint has only one non-zero
474 /// coefficient for dimensions that have type @p DimType. If this is true,
475 /// return the position of the dimension corresponding to the non-zero
476 /// coefficient and negative value, otherwise.
477 ///
478 /// @param Constraint The isl constraint to be checked.
479 /// @param DimType    The type of the dimensions.
480 /// @return           The position of the dimension in case the isl
481 ///                   constraint satisfies the requirements, a negative
482 ///                   value, otherwise.
483 static int getMatMulConstraintDim(__isl_keep isl_constraint *Constraint,
484                                   enum isl_dim_type DimType) {
485   int DimPos = -1;
486   auto *LocalSpace = isl_constraint_get_local_space(Constraint);
487   int LocalSpaceDimNum = isl_local_space_dim(LocalSpace, DimType);
488   for (int i = 0; i < LocalSpaceDimNum; i++) {
489     auto *Val = isl_constraint_get_coefficient_val(Constraint, DimType, i);
490     if (isl_val_is_zero(Val)) {
491       isl_val_free(Val);
492       continue;
493     }
494     if (DimPos >= 0 || (DimType == isl_dim_out && !isl_val_is_one(Val)) ||
495         (DimType == isl_dim_in && !isl_val_is_negone(Val))) {
496       isl_val_free(Val);
497       isl_local_space_free(LocalSpace);
498       return -1;
499     }
500     DimPos = i;
501     isl_val_free(Val);
502   }
503   isl_local_space_free(LocalSpace);
504   return DimPos;
505 }
506 
507 /// Check the form of the isl constraint.
508 ///
509 /// Check that the @p DimInPos input dimension of the isl constraint
510 /// @p Constraint has a coefficient that is equal to negative one, the @p
511 /// DimOutPos has a coefficient that is equal to one and others
512 /// have coefficients equal to zero.
513 ///
514 /// @param Constraint The isl constraint to be checked.
515 /// @param DimInPos   The input dimension of the isl constraint.
516 /// @param DimOutPos  The output dimension of the isl constraint.
517 /// @return           isl_stat_ok in case the isl constraint satisfies
518 ///                   the requirements, isl_stat_error otherwise.
519 static isl_stat isMatMulOperandConstraint(__isl_keep isl_constraint *Constraint,
520                                           int &DimInPos, int &DimOutPos) {
521   auto *Val = isl_constraint_get_constant_val(Constraint);
522   if (!isl_constraint_is_equality(Constraint) || !isl_val_is_zero(Val)) {
523     isl_val_free(Val);
524     return isl_stat_error;
525   }
526   isl_val_free(Val);
527   DimInPos = getMatMulConstraintDim(Constraint, isl_dim_in);
528   if (DimInPos < 0)
529     return isl_stat_error;
530   DimOutPos = getMatMulConstraintDim(Constraint, isl_dim_out);
531   if (DimOutPos < 0)
532     return isl_stat_error;
533   return isl_stat_ok;
534 }
535 
536 /// Check that the access relation corresponds to a non-constant operand
537 /// of the matrix multiplication.
538 ///
539 /// Access relations that correspond to non-constant operands of the matrix
540 /// multiplication depend only on two input dimensions and have two output
541 /// dimensions. The function checks that the isl basic map @p bmap satisfies
542 /// the requirements. The two input dimensions can be specified via @p user
543 /// array.
544 ///
545 /// @param bmap The isl basic map to be checked.
546 /// @param user The input dimensions of @p bmap.
547 /// @return     isl_stat_ok in case isl basic map satisfies the requirements,
548 ///             isl_stat_error otherwise.
549 static isl_stat isMatMulOperandBasicMap(__isl_take isl_basic_map *bmap,
550                                         void *user) {
551   auto *Constraints = isl_basic_map_get_constraint_list(bmap);
552   isl_basic_map_free(bmap);
553   if (isl_constraint_list_n_constraint(Constraints) != 2) {
554     isl_constraint_list_free(Constraints);
555     return isl_stat_error;
556   }
557   int InPosPair[] = {-1, -1};
558   auto DimInPos = user ? static_cast<int *>(user) : InPosPair;
559   for (int i = 0; i < 2; i++) {
560     auto *Constraint = isl_constraint_list_get_constraint(Constraints, i);
561     int InPos, OutPos;
562     if (isMatMulOperandConstraint(Constraint, InPos, OutPos) ==
563             isl_stat_error ||
564         OutPos > 1 || (DimInPos[OutPos] >= 0 && DimInPos[OutPos] != InPos)) {
565       isl_constraint_free(Constraint);
566       isl_constraint_list_free(Constraints);
567       return isl_stat_error;
568     }
569     DimInPos[OutPos] = InPos;
570     isl_constraint_free(Constraint);
571   }
572   isl_constraint_list_free(Constraints);
573   return isl_stat_ok;
574 }
575 
576 /// Permute the two dimensions of the isl map.
577 ///
578 /// Permute @p DstPos and @p SrcPos dimensions of the isl map @p Map that
579 /// have type @p DimType.
580 ///
581 /// @param Map     The isl map to be modified.
582 /// @param DimType The type of the dimensions.
583 /// @param DstPos  The first dimension.
584 /// @param SrcPos  The second dimension.
585 /// @return        The modified map.
586 __isl_give isl_map *permuteDimensions(__isl_take isl_map *Map,
587                                       enum isl_dim_type DimType,
588                                       unsigned DstPos, unsigned SrcPos) {
589   assert(DstPos < isl_map_dim(Map, DimType) &&
590          SrcPos < isl_map_dim(Map, DimType));
591   if (DstPos == SrcPos)
592     return Map;
593   isl_id *DimId = nullptr;
594   if (isl_map_has_tuple_id(Map, DimType))
595     DimId = isl_map_get_tuple_id(Map, DimType);
596   auto FreeDim = DimType == isl_dim_in ? isl_dim_out : isl_dim_in;
597   isl_id *FreeDimId = nullptr;
598   if (isl_map_has_tuple_id(Map, FreeDim))
599     FreeDimId = isl_map_get_tuple_id(Map, FreeDim);
600   auto MaxDim = std::max(DstPos, SrcPos);
601   auto MinDim = std::min(DstPos, SrcPos);
602   Map = isl_map_move_dims(Map, FreeDim, 0, DimType, MaxDim, 1);
603   Map = isl_map_move_dims(Map, FreeDim, 0, DimType, MinDim, 1);
604   Map = isl_map_move_dims(Map, DimType, MinDim, FreeDim, 1, 1);
605   Map = isl_map_move_dims(Map, DimType, MaxDim, FreeDim, 0, 1);
606   if (DimId)
607     Map = isl_map_set_tuple_id(Map, DimType, DimId);
608   if (FreeDimId)
609     Map = isl_map_set_tuple_id(Map, FreeDim, FreeDimId);
610   return Map;
611 }
612 
613 /// Check the form of the access relation.
614 ///
615 /// Check that the access relation @p AccMap has the form M[i][j], where i
616 /// is a @p FirstPos and j is a @p SecondPos.
617 ///
618 /// @param AccMap    The access relation to be checked.
619 /// @param FirstPos  The index of the input dimension that is mapped to
620 ///                  the first output dimension.
621 /// @param SecondPos The index of the input dimension that is mapped to the
622 ///                  second output dimension.
623 /// @return          True in case @p AccMap has the expected form and false,
624 ///                  otherwise.
625 static bool isMatMulOperandAcc(__isl_keep isl_map *AccMap, int &FirstPos,
626                                int &SecondPos) {
627   int DimInPos[] = {FirstPos, SecondPos};
628   if (isl_map_foreach_basic_map(AccMap, isMatMulOperandBasicMap,
629                                 static_cast<void *>(DimInPos)) != isl_stat_ok ||
630       DimInPos[0] < 0 || DimInPos[1] < 0)
631     return false;
632   FirstPos = DimInPos[0];
633   SecondPos = DimInPos[1];
634   return true;
635 }
636 
637 /// Does the memory access represent a non-scalar operand of the matrix
638 /// multiplication.
639 ///
640 /// Check that the memory access @p MemAccess is the read access to a non-scalar
641 /// operand of the matrix multiplication or its result.
642 ///
643 /// @param MemAccess The memory access to be checked.
644 /// @param MMI       Parameters of the matrix multiplication operands.
645 /// @return          True in case the memory access represents the read access
646 ///                  to a non-scalar operand of the matrix multiplication and
647 ///                  false, otherwise.
648 static bool isMatMulNonScalarReadAccess(MemoryAccess *MemAccess,
649                                         MatMulInfoTy &MMI) {
650   if (!MemAccess->isArrayKind() || !MemAccess->isRead())
651     return false;
652   isl_map *AccMap = MemAccess->getAccessRelation();
653   if (isMatMulOperandAcc(AccMap, MMI.i, MMI.j) && !MMI.ReadFromC &&
654       isl_map_n_basic_map(AccMap) == 1) {
655     MMI.ReadFromC = MemAccess;
656     isl_map_free(AccMap);
657     return true;
658   }
659   if (isMatMulOperandAcc(AccMap, MMI.i, MMI.k) && !MMI.A &&
660       isl_map_n_basic_map(AccMap) == 1) {
661     MMI.A = MemAccess;
662     isl_map_free(AccMap);
663     return true;
664   }
665   if (isMatMulOperandAcc(AccMap, MMI.k, MMI.j) && !MMI.B &&
666       isl_map_n_basic_map(AccMap) == 1) {
667     MMI.B = MemAccess;
668     isl_map_free(AccMap);
669     return true;
670   }
671   isl_map_free(AccMap);
672   return false;
673 }
674 
675 /// Check accesses to operands of the matrix multiplication.
676 ///
677 /// Check that accesses of the SCoP statement, which corresponds to
678 /// the partial schedule @p PartialSchedule, are scalar in terms of loops
679 /// containing the matrix multiplication, in case they do not represent
680 /// accesses to the non-scalar operands of the matrix multiplication or
681 /// its result.
682 ///
683 /// @param  PartialSchedule The partial schedule of the SCoP statement.
684 /// @param  MMI             Parameters of the matrix multiplication operands.
685 /// @return                 True in case the corresponding SCoP statement
686 ///                         represents matrix multiplication and false,
687 ///                         otherwise.
688 static bool containsOnlyMatrMultAcc(__isl_keep isl_map *PartialSchedule,
689                                     MatMulInfoTy &MMI) {
690   auto *InputDimId = isl_map_get_tuple_id(PartialSchedule, isl_dim_in);
691   auto *Stmt = static_cast<ScopStmt *>(isl_id_get_user(InputDimId));
692   isl_id_free(InputDimId);
693   unsigned OutDimNum = isl_map_dim(PartialSchedule, isl_dim_out);
694   assert(OutDimNum > 2 && "In case of the matrix multiplication the loop nest "
695                           "and, consequently, the corresponding scheduling "
696                           "functions have at least three dimensions.");
697   auto *MapI = permuteDimensions(isl_map_copy(PartialSchedule), isl_dim_out,
698                                  MMI.i, OutDimNum - 1);
699   auto *MapJ = permuteDimensions(isl_map_copy(PartialSchedule), isl_dim_out,
700                                  MMI.j, OutDimNum - 1);
701   auto *MapK = permuteDimensions(isl_map_copy(PartialSchedule), isl_dim_out,
702                                  MMI.k, OutDimNum - 1);
703   for (auto *MemA = Stmt->begin(); MemA != Stmt->end() - 1; MemA++) {
704     auto *MemAccessPtr = *MemA;
705     if (MemAccessPtr->isArrayKind() && MemAccessPtr != MMI.WriteToC &&
706         !isMatMulNonScalarReadAccess(MemAccessPtr, MMI) &&
707         !(MemAccessPtr->isStrideZero(isl_map_copy(MapI)) &&
708           MemAccessPtr->isStrideZero(isl_map_copy(MapJ)) &&
709           MemAccessPtr->isStrideZero(isl_map_copy(MapK)))) {
710       isl_map_free(MapI);
711       isl_map_free(MapJ);
712       isl_map_free(MapK);
713       return false;
714     }
715   }
716   isl_map_free(MapI);
717   isl_map_free(MapJ);
718   isl_map_free(MapK);
719   return true;
720 }
721 
722 /// Check for dependencies corresponding to the matrix multiplication.
723 ///
724 /// Check that there is only true dependence of the form
725 /// S(..., k, ...) -> S(..., k + 1, …), where S is the SCoP statement
726 /// represented by @p Schedule and k is @p Pos. Such a dependence corresponds
727 /// to the dependency produced by the matrix multiplication.
728 ///
729 /// @param  Schedule The schedule of the SCoP statement.
730 /// @param  D The SCoP dependencies.
731 /// @param  Pos The parameter to desribe an acceptable true dependence.
732 ///             In case it has a negative value, try to determine its
733 ///             acceptable value.
734 /// @return True in case dependencies correspond to the matrix multiplication
735 ///         and false, otherwise.
736 static bool containsOnlyMatMulDep(__isl_keep isl_map *Schedule,
737                                   const Dependences *D, int &Pos) {
738   auto *WAR = D->getDependences(Dependences::TYPE_WAR);
739   if (!isl_union_map_is_empty(WAR)) {
740     isl_union_map_free(WAR);
741     return false;
742   }
743   isl_union_map_free(WAR);
744   auto *RAW = D->getDependences(Dependences::TYPE_RAW);
745   auto *Domain = isl_map_domain(isl_map_copy(Schedule));
746   auto *Space = isl_space_map_from_domain_and_range(isl_set_get_space(Domain),
747                                                     isl_set_get_space(Domain));
748   isl_set_free(Domain);
749   auto *Deltas = isl_map_deltas(isl_union_map_extract_map(RAW, Space));
750   int DeltasDimNum = isl_set_dim(Deltas, isl_dim_set);
751   for (int i = 0; i < DeltasDimNum; i++) {
752     auto *Val = isl_set_plain_get_val_if_fixed(Deltas, isl_dim_set, i);
753     if (Pos < 0 && isl_val_is_one(Val))
754       Pos = i;
755     if (isl_val_is_nan(Val) ||
756         !(isl_val_is_zero(Val) || (i == Pos && isl_val_is_one(Val)))) {
757       isl_val_free(Val);
758       isl_union_map_free(RAW);
759       isl_set_free(Deltas);
760       return false;
761     }
762     isl_val_free(Val);
763   }
764   isl_union_map_free(RAW);
765   isl_set_free(Deltas);
766   return true;
767 }
768 
769 /// Check if the SCoP statement could probably be optimized with analytical
770 /// modeling.
771 ///
772 /// containsMatrMult tries to determine whether the following conditions
773 /// are true:
774 /// 1. The last memory access modeling an array, MA1, represents writing to
775 ///    memory and has the form S(..., i1, ..., i2, ...) -> M(i1, i2) or
776 ///    S(..., i2, ..., i1, ...) -> M(i1, i2), where S is the SCoP statement
777 ///    under consideration.
778 /// 2. There is only one loop-carried true dependency, and it has the
779 ///    form S(..., i3, ...) -> S(..., i3 + 1, ...), and there are no
780 ///    loop-carried or anti dependencies.
781 /// 3. SCoP contains three access relations, MA2, MA3, and MA4 that represent
782 ///    reading from memory and have the form S(..., i3, ...) -> M(i1, i3),
783 ///    S(..., i3, ...) -> M(i3, i2), S(...) -> M(i1, i2), respectively,
784 ///    and all memory accesses of the SCoP that are different from MA1, MA2,
785 ///    MA3, and MA4 have stride 0, if the innermost loop is exchanged with any
786 ///    of loops i1, i2 and i3.
787 ///
788 /// @param PartialSchedule The PartialSchedule that contains a SCoP statement
789 ///        to check.
790 /// @D     The SCoP dependencies.
791 /// @MMI   Parameters of the matrix multiplication operands.
792 static bool containsMatrMult(__isl_keep isl_map *PartialSchedule,
793                              const Dependences *D, MatMulInfoTy &MMI) {
794   auto *InputDimsId = isl_map_get_tuple_id(PartialSchedule, isl_dim_in);
795   auto *Stmt = static_cast<ScopStmt *>(isl_id_get_user(InputDimsId));
796   isl_id_free(InputDimsId);
797   if (Stmt->size() <= 1)
798     return false;
799   for (auto *MemA = Stmt->end() - 1; MemA != Stmt->begin(); MemA--) {
800     auto *MemAccessPtr = *MemA;
801     if (!MemAccessPtr->isArrayKind())
802       continue;
803     if (!MemAccessPtr->isWrite())
804       return false;
805     auto *AccMap = MemAccessPtr->getAccessRelation();
806     if (isl_map_n_basic_map(AccMap) != 1 ||
807         !isMatMulOperandAcc(AccMap, MMI.i, MMI.j)) {
808       isl_map_free(AccMap);
809       return false;
810     }
811     isl_map_free(AccMap);
812     MMI.WriteToC = MemAccessPtr;
813     break;
814   }
815 
816   if (!containsOnlyMatMulDep(PartialSchedule, D, MMI.k))
817     return false;
818 
819   if (!MMI.WriteToC || !containsOnlyMatrMultAcc(PartialSchedule, MMI))
820     return false;
821 
822   if (!MMI.A || !MMI.B || !MMI.ReadFromC)
823     return false;
824   return true;
825 }
826 
827 /// Permute two dimensions of the band node.
828 ///
829 /// Permute FirstDim and SecondDim dimensions of the Node.
830 ///
831 /// @param Node The band node to be modified.
832 /// @param FirstDim The first dimension to be permuted.
833 /// @param SecondDim The second dimension to be permuted.
834 static __isl_give isl_schedule_node *
835 permuteBandNodeDimensions(__isl_take isl_schedule_node *Node, unsigned FirstDim,
836                           unsigned SecondDim) {
837   assert(isl_schedule_node_get_type(Node) == isl_schedule_node_band &&
838          isl_schedule_node_band_n_member(Node) > std::max(FirstDim, SecondDim));
839   auto PartialSchedule = isl_schedule_node_band_get_partial_schedule(Node);
840   auto PartialScheduleFirstDim =
841       isl_multi_union_pw_aff_get_union_pw_aff(PartialSchedule, FirstDim);
842   auto PartialScheduleSecondDim =
843       isl_multi_union_pw_aff_get_union_pw_aff(PartialSchedule, SecondDim);
844   PartialSchedule = isl_multi_union_pw_aff_set_union_pw_aff(
845       PartialSchedule, SecondDim, PartialScheduleFirstDim);
846   PartialSchedule = isl_multi_union_pw_aff_set_union_pw_aff(
847       PartialSchedule, FirstDim, PartialScheduleSecondDim);
848   Node = isl_schedule_node_delete(Node);
849   Node = isl_schedule_node_insert_partial_schedule(Node, PartialSchedule);
850   return Node;
851 }
852 
853 __isl_give isl_schedule_node *ScheduleTreeOptimizer::createMicroKernel(
854     __isl_take isl_schedule_node *Node, MicroKernelParamsTy MicroKernelParams) {
855   applyRegisterTiling(Node, {MicroKernelParams.Mr, MicroKernelParams.Nr}, 1);
856   Node = isl_schedule_node_parent(isl_schedule_node_parent(Node));
857   Node = permuteBandNodeDimensions(Node, 0, 1);
858   return isl_schedule_node_child(isl_schedule_node_child(Node, 0), 0);
859 }
860 
861 __isl_give isl_schedule_node *ScheduleTreeOptimizer::createMacroKernel(
862     __isl_take isl_schedule_node *Node, MacroKernelParamsTy MacroKernelParams) {
863   assert(isl_schedule_node_get_type(Node) == isl_schedule_node_band);
864   if (MacroKernelParams.Mc == 1 && MacroKernelParams.Nc == 1 &&
865       MacroKernelParams.Kc == 1)
866     return Node;
867   int DimOutNum = isl_schedule_node_band_n_member(Node);
868   std::vector<int> TileSizes(DimOutNum, 1);
869   TileSizes[DimOutNum - 3] = MacroKernelParams.Mc;
870   TileSizes[DimOutNum - 2] = MacroKernelParams.Nc;
871   TileSizes[DimOutNum - 1] = MacroKernelParams.Kc;
872   Node = tileNode(Node, "1st level tiling", TileSizes, 1);
873   Node = isl_schedule_node_parent(isl_schedule_node_parent(Node));
874   Node = permuteBandNodeDimensions(Node, DimOutNum - 2, DimOutNum - 1);
875   Node = permuteBandNodeDimensions(Node, DimOutNum - 3, DimOutNum - 1);
876   return isl_schedule_node_child(isl_schedule_node_child(Node, 0), 0);
877 }
878 
879 /// Get parameters of the BLIS micro kernel.
880 ///
881 /// We choose the Mr and Nr parameters of the micro kernel to be large enough
882 /// such that no stalls caused by the combination of latencies and dependencies
883 /// are introduced during the updates of the resulting matrix of the matrix
884 /// multiplication. However, they should also be as small as possible to
885 /// release more registers for entries of multiplied matrices.
886 ///
887 /// @param TTI Target Transform Info.
888 /// @return The structure of type MicroKernelParamsTy.
889 /// @see MicroKernelParamsTy
890 static struct MicroKernelParamsTy
891 getMicroKernelParams(const llvm::TargetTransformInfo *TTI) {
892   assert(TTI && "The target transform info should be provided.");
893 
894   // Nvec - Number of double-precision floating-point numbers that can be hold
895   // by a vector register. Use 2 by default.
896   long RegisterBitwidth = VectorRegisterBitwidth;
897 
898   if (RegisterBitwidth == -1)
899     RegisterBitwidth = TTI->getRegisterBitWidth(true);
900   auto Nvec = RegisterBitwidth / 64;
901   if (Nvec == 0)
902     Nvec = 2;
903   int Nr =
904       ceil(sqrt(Nvec * LatencyVectorFma * ThroughputVectorFma) / Nvec) * Nvec;
905   int Mr = ceil(Nvec * LatencyVectorFma * ThroughputVectorFma / Nr);
906   return {Mr, Nr};
907 }
908 
909 /// Get parameters of the BLIS macro kernel.
910 ///
911 /// During the computation of matrix multiplication, blocks of partitioned
912 /// matrices are mapped to different layers of the memory hierarchy.
913 /// To optimize data reuse, blocks should be ideally kept in cache between
914 /// iterations. Since parameters of the macro kernel determine sizes of these
915 /// blocks, there are upper and lower bounds on these parameters.
916 ///
917 /// @param MicroKernelParams Parameters of the micro-kernel
918 ///                          to be taken into account.
919 /// @return The structure of type MacroKernelParamsTy.
920 /// @see MacroKernelParamsTy
921 /// @see MicroKernelParamsTy
922 static struct MacroKernelParamsTy
923 getMacroKernelParams(const MicroKernelParamsTy &MicroKernelParams) {
924   // According to www.cs.utexas.edu/users/flame/pubs/TOMS-BLIS-Analytical.pdf,
925   // it requires information about the first two levels of a cache to determine
926   // all the parameters of a macro-kernel. It also checks that an associativity
927   // degree of a cache level is greater than two. Otherwise, another algorithm
928   // for determination of the parameters should be used.
929   if (!(MicroKernelParams.Mr > 0 && MicroKernelParams.Nr > 0 &&
930         FirstCacheLevelSize > 0 && SecondCacheLevelSize > 0 &&
931         FirstCacheLevelAssociativity > 2 && SecondCacheLevelAssociativity > 2))
932     return {1, 1, 1};
933   // The quotient should be greater than zero.
934   if (PollyPatternMatchingNcQuotient <= 0)
935     return {1, 1, 1};
936   int Car = floor(
937       (FirstCacheLevelAssociativity - 1) /
938       (1 + static_cast<double>(MicroKernelParams.Nr) / MicroKernelParams.Mr));
939   int Kc = (Car * FirstCacheLevelSize) /
940            (MicroKernelParams.Mr * FirstCacheLevelAssociativity * 8);
941   double Cac = static_cast<double>(Kc * 8 * SecondCacheLevelAssociativity) /
942                SecondCacheLevelSize;
943   int Mc = floor((SecondCacheLevelAssociativity - 2) / Cac);
944   int Nc = PollyPatternMatchingNcQuotient * MicroKernelParams.Nr;
945   return {Mc, Nc, Kc};
946 }
947 
948 /// Create an access relation that is specific to
949 ///        the matrix multiplication pattern.
950 ///
951 /// Create an access relation of the following form:
952 /// [O0, O1, O2, O3, O4, O5, O6, O7, O8] -> [OI, O5, OJ]
953 /// where I is @p FirstDim, J is @p SecondDim.
954 ///
955 /// It can be used, for example, to create relations that helps to consequently
956 /// access elements of operands of a matrix multiplication after creation of
957 /// the BLIS micro and macro kernels.
958 ///
959 /// @see ScheduleTreeOptimizer::createMicroKernel
960 /// @see ScheduleTreeOptimizer::createMacroKernel
961 ///
962 /// Subsequently, the described access relation is applied to the range of
963 /// @p MapOldIndVar, that is used to map original induction variables to
964 /// the ones, which are produced by schedule transformations. It helps to
965 /// define relations using a new space and, at the same time, keep them
966 /// in the original one.
967 ///
968 /// @param MapOldIndVar The relation, which maps original induction variables
969 ///                     to the ones, which are produced by schedule
970 ///                     transformations.
971 /// @param FirstDim, SecondDim The input dimensions that are used to define
972 ///        the specified access relation.
973 /// @return The specified access relation.
974 __isl_give isl_map *getMatMulAccRel(__isl_take isl_map *MapOldIndVar,
975                                     unsigned FirstDim, unsigned SecondDim) {
976   auto *Ctx = isl_map_get_ctx(MapOldIndVar);
977   auto *AccessRelSpace = isl_space_alloc(Ctx, 0, 9, 3);
978   auto *AccessRel = isl_map_universe(AccessRelSpace);
979   AccessRel = isl_map_equate(AccessRel, isl_dim_in, FirstDim, isl_dim_out, 0);
980   AccessRel = isl_map_equate(AccessRel, isl_dim_in, 5, isl_dim_out, 1);
981   AccessRel = isl_map_equate(AccessRel, isl_dim_in, SecondDim, isl_dim_out, 2);
982   return isl_map_apply_range(MapOldIndVar, AccessRel);
983 }
984 
985 __isl_give isl_schedule_node *
986 createExtensionNode(__isl_take isl_schedule_node *Node,
987                     __isl_take isl_map *ExtensionMap) {
988   auto *Extension = isl_union_map_from_map(ExtensionMap);
989   auto *NewNode = isl_schedule_node_from_extension(Extension);
990   return isl_schedule_node_graft_before(Node, NewNode);
991 }
992 
993 /// Apply the packing transformation.
994 ///
995 /// The packing transformation can be described as a data-layout
996 /// transformation that requires to introduce a new array, copy data
997 /// to the array, and change memory access locations to reference the array.
998 /// It can be used to ensure that elements of the new array are read in-stride
999 /// access, aligned to cache lines boundaries, and preloaded into certain cache
1000 /// levels.
1001 ///
1002 /// As an example let us consider the packing of the array A that would help
1003 /// to read its elements with in-stride access. An access to the array A
1004 /// is represented by an access relation that has the form
1005 /// S[i, j, k] -> A[i, k]. The scheduling function of the SCoP statement S has
1006 /// the form S[i,j, k] -> [floor((j mod Nc) / Nr), floor((i mod Mc) / Mr),
1007 /// k mod Kc, j mod Nr, i mod Mr].
1008 ///
1009 /// To ensure that elements of the array A are read in-stride access, we add
1010 /// a new array Packed_A[Mc/Mr][Kc][Mr] to the SCoP, using
1011 /// Scop::createScopArrayInfo, change the access relation
1012 /// S[i, j, k] -> A[i, k] to
1013 /// S[i, j, k] -> Packed_A[floor((i mod Mc) / Mr), k mod Kc, i mod Mr], using
1014 /// MemoryAccess::setNewAccessRelation, and copy the data to the array, using
1015 /// the copy statement created by Scop::addScopStmt.
1016 ///
1017 /// @param Node The schedule node to be optimized.
1018 /// @param MapOldIndVar The relation, which maps original induction variables
1019 ///                     to the ones, which are produced by schedule
1020 ///                     transformations.
1021 /// @param MicroParams, MacroParams Parameters of the BLIS kernel
1022 ///                                 to be taken into account.
1023 /// @param MMI Parameters of the matrix multiplication operands.
1024 /// @return The optimized schedule node.
1025 static __isl_give isl_schedule_node *optimizeDataLayoutMatrMulPattern(
1026     __isl_take isl_schedule_node *Node, __isl_take isl_map *MapOldIndVar,
1027     MicroKernelParamsTy MicroParams, MacroKernelParamsTy MacroParams,
1028     MatMulInfoTy &MMI) {
1029   auto InputDimsId = isl_map_get_tuple_id(MapOldIndVar, isl_dim_in);
1030   auto *Stmt = static_cast<ScopStmt *>(isl_id_get_user(InputDimsId));
1031   isl_id_free(InputDimsId);
1032 
1033   // Create a copy statement that corresponds to the memory access to the
1034   // matrix B, the second operand of the matrix multiplication.
1035   Node = isl_schedule_node_parent(isl_schedule_node_parent(Node));
1036   Node = isl_schedule_node_parent(isl_schedule_node_parent(Node));
1037   Node = isl_schedule_node_parent(Node);
1038   Node = isl_schedule_node_child(isl_schedule_node_band_split(Node, 2), 0);
1039   auto *AccRel = getMatMulAccRel(isl_map_copy(MapOldIndVar), 3, 7);
1040   unsigned FirstDimSize = MacroParams.Nc / MicroParams.Nr;
1041   unsigned SecondDimSize = MacroParams.Kc;
1042   unsigned ThirdDimSize = MicroParams.Nr;
1043   auto *SAI = Stmt->getParent()->createScopArrayInfo(
1044       MMI.B->getElementType(), "Packed_B",
1045       {FirstDimSize, SecondDimSize, ThirdDimSize});
1046   AccRel = isl_map_set_tuple_id(AccRel, isl_dim_out, SAI->getBasePtrId());
1047   auto *OldAcc = MMI.B->getAccessRelation();
1048   MMI.B->setNewAccessRelation(AccRel);
1049   auto *ExtMap =
1050       isl_map_project_out(isl_map_copy(MapOldIndVar), isl_dim_out, 2,
1051                           isl_map_dim(MapOldIndVar, isl_dim_out) - 2);
1052   ExtMap = isl_map_reverse(ExtMap);
1053   ExtMap = isl_map_fix_si(ExtMap, isl_dim_out, MMI.i, 0);
1054   auto *Domain = Stmt->getDomain();
1055 
1056   // Restrict the domains of the copy statements to only execute when also its
1057   // originating statement is executed.
1058   auto *DomainId = isl_set_get_tuple_id(Domain);
1059   auto *NewStmt = Stmt->getParent()->addScopStmt(
1060       OldAcc, MMI.B->getAccessRelation(), isl_set_copy(Domain));
1061   ExtMap = isl_map_set_tuple_id(ExtMap, isl_dim_out, isl_id_copy(DomainId));
1062   ExtMap = isl_map_intersect_range(ExtMap, isl_set_copy(Domain));
1063   ExtMap = isl_map_set_tuple_id(ExtMap, isl_dim_out, NewStmt->getDomainId());
1064   Node = createExtensionNode(Node, ExtMap);
1065 
1066   // Create a copy statement that corresponds to the memory access
1067   // to the matrix A, the first operand of the matrix multiplication.
1068   Node = isl_schedule_node_child(Node, 0);
1069   AccRel = getMatMulAccRel(isl_map_copy(MapOldIndVar), 4, 6);
1070   FirstDimSize = MacroParams.Mc / MicroParams.Mr;
1071   ThirdDimSize = MicroParams.Mr;
1072   SAI = Stmt->getParent()->createScopArrayInfo(
1073       MMI.A->getElementType(), "Packed_A",
1074       {FirstDimSize, SecondDimSize, ThirdDimSize});
1075   AccRel = isl_map_set_tuple_id(AccRel, isl_dim_out, SAI->getBasePtrId());
1076   OldAcc = MMI.A->getAccessRelation();
1077   MMI.A->setNewAccessRelation(AccRel);
1078   ExtMap = isl_map_project_out(MapOldIndVar, isl_dim_out, 3,
1079                                isl_map_dim(MapOldIndVar, isl_dim_out) - 3);
1080   ExtMap = isl_map_reverse(ExtMap);
1081   ExtMap = isl_map_fix_si(ExtMap, isl_dim_out, MMI.j, 0);
1082   NewStmt = Stmt->getParent()->addScopStmt(OldAcc, MMI.A->getAccessRelation(),
1083                                            isl_set_copy(Domain));
1084 
1085   // Restrict the domains of the copy statements to only execute when also its
1086   // originating statement is executed.
1087   ExtMap = isl_map_set_tuple_id(ExtMap, isl_dim_out, DomainId);
1088   ExtMap = isl_map_intersect_range(ExtMap, Domain);
1089   ExtMap = isl_map_set_tuple_id(ExtMap, isl_dim_out, NewStmt->getDomainId());
1090   Node = createExtensionNode(Node, ExtMap);
1091   Node = isl_schedule_node_child(isl_schedule_node_child(Node, 0), 0);
1092   return isl_schedule_node_child(isl_schedule_node_child(Node, 0), 0);
1093 }
1094 
1095 /// Get a relation mapping induction variables produced by schedule
1096 /// transformations to the original ones.
1097 ///
1098 /// @param Node The schedule node produced as the result of creation
1099 ///        of the BLIS kernels.
1100 /// @param MicroKernelParams, MacroKernelParams Parameters of the BLIS kernel
1101 ///                                             to be taken into account.
1102 /// @return  The relation mapping original induction variables to the ones
1103 ///          produced by schedule transformation.
1104 /// @see ScheduleTreeOptimizer::createMicroKernel
1105 /// @see ScheduleTreeOptimizer::createMacroKernel
1106 /// @see getMacroKernelParams
1107 __isl_give isl_map *
1108 getInductionVariablesSubstitution(__isl_take isl_schedule_node *Node,
1109                                   MicroKernelParamsTy MicroKernelParams,
1110                                   MacroKernelParamsTy MacroKernelParams) {
1111   auto *Child = isl_schedule_node_get_child(Node, 0);
1112   auto *UnMapOldIndVar = isl_schedule_node_get_prefix_schedule_union_map(Child);
1113   isl_schedule_node_free(Child);
1114   auto *MapOldIndVar = isl_map_from_union_map(UnMapOldIndVar);
1115   if (isl_map_dim(MapOldIndVar, isl_dim_out) > 9)
1116     MapOldIndVar =
1117         isl_map_project_out(MapOldIndVar, isl_dim_out, 0,
1118                             isl_map_dim(MapOldIndVar, isl_dim_out) - 9);
1119   return MapOldIndVar;
1120 }
1121 
1122 __isl_give isl_schedule_node *ScheduleTreeOptimizer::optimizeMatMulPattern(
1123     __isl_take isl_schedule_node *Node, const llvm::TargetTransformInfo *TTI,
1124     MatMulInfoTy &MMI) {
1125   assert(TTI && "The target transform info should be provided.");
1126   int DimOutNum = isl_schedule_node_band_n_member(Node);
1127   assert(DimOutNum > 2 && "In case of the matrix multiplication the loop nest "
1128                           "and, consequently, the corresponding scheduling "
1129                           "functions have at least three dimensions.");
1130   Node = permuteBandNodeDimensions(Node, MMI.i, DimOutNum - 3);
1131   int NewJ = MMI.j == DimOutNum - 3 ? MMI.i : MMI.j;
1132   int NewK = MMI.k == DimOutNum - 3 ? MMI.i : MMI.k;
1133   Node = permuteBandNodeDimensions(Node, NewJ, DimOutNum - 2);
1134   NewK = MMI.k == DimOutNum - 2 ? MMI.j : MMI.k;
1135   Node = permuteBandNodeDimensions(Node, NewK, DimOutNum - 1);
1136   auto MicroKernelParams = getMicroKernelParams(TTI);
1137   auto MacroKernelParams = getMacroKernelParams(MicroKernelParams);
1138   Node = createMacroKernel(Node, MacroKernelParams);
1139   Node = createMicroKernel(Node, MicroKernelParams);
1140   if (MacroKernelParams.Mc == 1 || MacroKernelParams.Nc == 1 ||
1141       MacroKernelParams.Kc == 1)
1142     return Node;
1143   auto *MapOldIndVar = getInductionVariablesSubstitution(
1144       Node, MicroKernelParams, MacroKernelParams);
1145   if (!MapOldIndVar)
1146     return Node;
1147   return optimizeDataLayoutMatrMulPattern(Node, MapOldIndVar, MicroKernelParams,
1148                                           MacroKernelParams, MMI);
1149 }
1150 
1151 bool ScheduleTreeOptimizer::isMatrMultPattern(
1152     __isl_keep isl_schedule_node *Node, const Dependences *D,
1153     MatMulInfoTy &MMI) {
1154   auto *PartialSchedule =
1155       isl_schedule_node_band_get_partial_schedule_union_map(Node);
1156   if (isl_schedule_node_band_n_member(Node) < 3 ||
1157       isl_union_map_n_map(PartialSchedule) != 1) {
1158     isl_union_map_free(PartialSchedule);
1159     return false;
1160   }
1161   auto *NewPartialSchedule = isl_map_from_union_map(PartialSchedule);
1162   if (containsMatrMult(NewPartialSchedule, D, MMI)) {
1163     isl_map_free(NewPartialSchedule);
1164     return true;
1165   }
1166   isl_map_free(NewPartialSchedule);
1167   return false;
1168 }
1169 
1170 __isl_give isl_schedule_node *
1171 ScheduleTreeOptimizer::optimizeBand(__isl_take isl_schedule_node *Node,
1172                                     void *User) {
1173   if (!isTileableBandNode(Node))
1174     return Node;
1175 
1176   const OptimizerAdditionalInfoTy *OAI =
1177       static_cast<const OptimizerAdditionalInfoTy *>(User);
1178 
1179   MatMulInfoTy MMI;
1180   if (PMBasedOpts && User && isMatrMultPattern(Node, OAI->D, MMI)) {
1181     DEBUG(dbgs() << "The matrix multiplication pattern was detected\n");
1182     Node = optimizeMatMulPattern(Node, OAI->TTI, MMI);
1183   }
1184 
1185   return standardBandOpts(Node, User);
1186 }
1187 
1188 __isl_give isl_schedule *
1189 ScheduleTreeOptimizer::optimizeSchedule(__isl_take isl_schedule *Schedule,
1190                                         const OptimizerAdditionalInfoTy *OAI) {
1191   isl_schedule_node *Root = isl_schedule_get_root(Schedule);
1192   Root = optimizeScheduleNode(Root, OAI);
1193   isl_schedule_free(Schedule);
1194   auto S = isl_schedule_node_get_schedule(Root);
1195   isl_schedule_node_free(Root);
1196   return S;
1197 }
1198 
1199 __isl_give isl_schedule_node *ScheduleTreeOptimizer::optimizeScheduleNode(
1200     __isl_take isl_schedule_node *Node, const OptimizerAdditionalInfoTy *OAI) {
1201   Node = isl_schedule_node_map_descendant_bottom_up(
1202       Node, optimizeBand, const_cast<void *>(static_cast<const void *>(OAI)));
1203   return Node;
1204 }
1205 
1206 bool ScheduleTreeOptimizer::isProfitableSchedule(
1207     Scop &S, __isl_keep isl_schedule *NewSchedule) {
1208   // To understand if the schedule has been optimized we check if the schedule
1209   // has changed at all.
1210   // TODO: We can improve this by tracking if any necessarily beneficial
1211   // transformations have been performed. This can e.g. be tiling, loop
1212   // interchange, or ...) We can track this either at the place where the
1213   // transformation has been performed or, in case of automatic ILP based
1214   // optimizations, by comparing (yet to be defined) performance metrics
1215   // before/after the scheduling optimizer
1216   // (e.g., #stride-one accesses)
1217   if (S.containsExtensionNode(NewSchedule))
1218     return true;
1219   auto *NewScheduleMap = isl_schedule_get_map(NewSchedule);
1220   isl_union_map *OldSchedule = S.getSchedule();
1221   assert(OldSchedule && "Only IslScheduleOptimizer can insert extension nodes "
1222                         "that make Scop::getSchedule() return nullptr.");
1223   bool changed = !isl_union_map_is_equal(OldSchedule, NewScheduleMap);
1224   isl_union_map_free(OldSchedule);
1225   isl_union_map_free(NewScheduleMap);
1226   return changed;
1227 }
1228 
1229 namespace {
1230 class IslScheduleOptimizer : public ScopPass {
1231 public:
1232   static char ID;
1233   explicit IslScheduleOptimizer() : ScopPass(ID) { LastSchedule = nullptr; }
1234 
1235   ~IslScheduleOptimizer() { isl_schedule_free(LastSchedule); }
1236 
1237   /// Optimize the schedule of the SCoP @p S.
1238   bool runOnScop(Scop &S) override;
1239 
1240   /// Print the new schedule for the SCoP @p S.
1241   void printScop(raw_ostream &OS, Scop &S) const override;
1242 
1243   /// Register all analyses and transformation required.
1244   void getAnalysisUsage(AnalysisUsage &AU) const override;
1245 
1246   /// Release the internal memory.
1247   void releaseMemory() override {
1248     isl_schedule_free(LastSchedule);
1249     LastSchedule = nullptr;
1250   }
1251 
1252 private:
1253   isl_schedule *LastSchedule;
1254 };
1255 } // namespace
1256 
1257 char IslScheduleOptimizer::ID = 0;
1258 
1259 bool IslScheduleOptimizer::runOnScop(Scop &S) {
1260 
1261   // Skip empty SCoPs but still allow code generation as it will delete the
1262   // loops present but not needed.
1263   if (S.getSize() == 0) {
1264     S.markAsOptimized();
1265     return false;
1266   }
1267 
1268   const Dependences &D =
1269       getAnalysis<DependenceInfo>().getDependences(Dependences::AL_Statement);
1270 
1271   if (!D.hasValidDependences())
1272     return false;
1273 
1274   isl_schedule_free(LastSchedule);
1275   LastSchedule = nullptr;
1276 
1277   // Build input data.
1278   int ValidityKinds =
1279       Dependences::TYPE_RAW | Dependences::TYPE_WAR | Dependences::TYPE_WAW;
1280   int ProximityKinds;
1281 
1282   if (OptimizeDeps == "all")
1283     ProximityKinds =
1284         Dependences::TYPE_RAW | Dependences::TYPE_WAR | Dependences::TYPE_WAW;
1285   else if (OptimizeDeps == "raw")
1286     ProximityKinds = Dependences::TYPE_RAW;
1287   else {
1288     errs() << "Do not know how to optimize for '" << OptimizeDeps << "'"
1289            << " Falling back to optimizing all dependences.\n";
1290     ProximityKinds =
1291         Dependences::TYPE_RAW | Dependences::TYPE_WAR | Dependences::TYPE_WAW;
1292   }
1293 
1294   isl_union_set *Domain = S.getDomains();
1295 
1296   if (!Domain)
1297     return false;
1298 
1299   isl_union_map *Validity = D.getDependences(ValidityKinds);
1300   isl_union_map *Proximity = D.getDependences(ProximityKinds);
1301 
1302   // Simplify the dependences by removing the constraints introduced by the
1303   // domains. This can speed up the scheduling time significantly, as large
1304   // constant coefficients will be removed from the dependences. The
1305   // introduction of some additional dependences reduces the possible
1306   // transformations, but in most cases, such transformation do not seem to be
1307   // interesting anyway. In some cases this option may stop the scheduler to
1308   // find any schedule.
1309   if (SimplifyDeps == "yes") {
1310     Validity = isl_union_map_gist_domain(Validity, isl_union_set_copy(Domain));
1311     Validity = isl_union_map_gist_range(Validity, isl_union_set_copy(Domain));
1312     Proximity =
1313         isl_union_map_gist_domain(Proximity, isl_union_set_copy(Domain));
1314     Proximity = isl_union_map_gist_range(Proximity, isl_union_set_copy(Domain));
1315   } else if (SimplifyDeps != "no") {
1316     errs() << "warning: Option -polly-opt-simplify-deps should either be 'yes' "
1317               "or 'no'. Falling back to default: 'yes'\n";
1318   }
1319 
1320   DEBUG(dbgs() << "\n\nCompute schedule from: ");
1321   DEBUG(dbgs() << "Domain := " << stringFromIslObj(Domain) << ";\n");
1322   DEBUG(dbgs() << "Proximity := " << stringFromIslObj(Proximity) << ";\n");
1323   DEBUG(dbgs() << "Validity := " << stringFromIslObj(Validity) << ";\n");
1324 
1325   unsigned IslSerializeSCCs;
1326 
1327   if (FusionStrategy == "max") {
1328     IslSerializeSCCs = 0;
1329   } else if (FusionStrategy == "min") {
1330     IslSerializeSCCs = 1;
1331   } else {
1332     errs() << "warning: Unknown fusion strategy. Falling back to maximal "
1333               "fusion.\n";
1334     IslSerializeSCCs = 0;
1335   }
1336 
1337   int IslMaximizeBands;
1338 
1339   if (MaximizeBandDepth == "yes") {
1340     IslMaximizeBands = 1;
1341   } else if (MaximizeBandDepth == "no") {
1342     IslMaximizeBands = 0;
1343   } else {
1344     errs() << "warning: Option -polly-opt-maximize-bands should either be 'yes'"
1345               " or 'no'. Falling back to default: 'yes'\n";
1346     IslMaximizeBands = 1;
1347   }
1348 
1349   int IslOuterCoincidence;
1350 
1351   if (OuterCoincidence == "yes") {
1352     IslOuterCoincidence = 1;
1353   } else if (OuterCoincidence == "no") {
1354     IslOuterCoincidence = 0;
1355   } else {
1356     errs() << "warning: Option -polly-opt-outer-coincidence should either be "
1357               "'yes' or 'no'. Falling back to default: 'no'\n";
1358     IslOuterCoincidence = 0;
1359   }
1360 
1361   isl_ctx *Ctx = S.getIslCtx();
1362 
1363   isl_options_set_schedule_outer_coincidence(Ctx, IslOuterCoincidence);
1364   isl_options_set_schedule_serialize_sccs(Ctx, IslSerializeSCCs);
1365   isl_options_set_schedule_maximize_band_depth(Ctx, IslMaximizeBands);
1366   isl_options_set_schedule_max_constant_term(Ctx, MaxConstantTerm);
1367   isl_options_set_schedule_max_coefficient(Ctx, MaxCoefficient);
1368   isl_options_set_tile_scale_tile_loops(Ctx, 0);
1369 
1370   auto OnErrorStatus = isl_options_get_on_error(Ctx);
1371   isl_options_set_on_error(Ctx, ISL_ON_ERROR_CONTINUE);
1372 
1373   isl_schedule_constraints *ScheduleConstraints;
1374   ScheduleConstraints = isl_schedule_constraints_on_domain(Domain);
1375   ScheduleConstraints =
1376       isl_schedule_constraints_set_proximity(ScheduleConstraints, Proximity);
1377   ScheduleConstraints = isl_schedule_constraints_set_validity(
1378       ScheduleConstraints, isl_union_map_copy(Validity));
1379   ScheduleConstraints =
1380       isl_schedule_constraints_set_coincidence(ScheduleConstraints, Validity);
1381   isl_schedule *Schedule;
1382   Schedule = isl_schedule_constraints_compute_schedule(ScheduleConstraints);
1383   isl_options_set_on_error(Ctx, OnErrorStatus);
1384 
1385   // In cases the scheduler is not able to optimize the code, we just do not
1386   // touch the schedule.
1387   if (!Schedule)
1388     return false;
1389 
1390   DEBUG({
1391     auto *P = isl_printer_to_str(Ctx);
1392     P = isl_printer_set_yaml_style(P, ISL_YAML_STYLE_BLOCK);
1393     P = isl_printer_print_schedule(P, Schedule);
1394     auto *str = isl_printer_get_str(P);
1395     dbgs() << "NewScheduleTree: \n" << str << "\n";
1396     free(str);
1397     isl_printer_free(P);
1398   });
1399 
1400   Function &F = S.getFunction();
1401   auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
1402   const OptimizerAdditionalInfoTy OAI = {TTI, const_cast<Dependences *>(&D)};
1403   isl_schedule *NewSchedule =
1404       ScheduleTreeOptimizer::optimizeSchedule(Schedule, &OAI);
1405 
1406   if (!ScheduleTreeOptimizer::isProfitableSchedule(S, NewSchedule)) {
1407     isl_schedule_free(NewSchedule);
1408     return false;
1409   }
1410 
1411   S.setScheduleTree(NewSchedule);
1412   S.markAsOptimized();
1413 
1414   if (OptimizedScops)
1415     S.dump();
1416 
1417   return false;
1418 }
1419 
1420 void IslScheduleOptimizer::printScop(raw_ostream &OS, Scop &) const {
1421   isl_printer *p;
1422   char *ScheduleStr;
1423 
1424   OS << "Calculated schedule:\n";
1425 
1426   if (!LastSchedule) {
1427     OS << "n/a\n";
1428     return;
1429   }
1430 
1431   p = isl_printer_to_str(isl_schedule_get_ctx(LastSchedule));
1432   p = isl_printer_print_schedule(p, LastSchedule);
1433   ScheduleStr = isl_printer_get_str(p);
1434   isl_printer_free(p);
1435 
1436   OS << ScheduleStr << "\n";
1437 }
1438 
1439 void IslScheduleOptimizer::getAnalysisUsage(AnalysisUsage &AU) const {
1440   ScopPass::getAnalysisUsage(AU);
1441   AU.addRequired<DependenceInfo>();
1442   AU.addRequired<TargetTransformInfoWrapperPass>();
1443 }
1444 
1445 Pass *polly::createIslScheduleOptimizerPass() {
1446   return new IslScheduleOptimizer();
1447 }
1448 
1449 INITIALIZE_PASS_BEGIN(IslScheduleOptimizer, "polly-opt-isl",
1450                       "Polly - Optimize schedule of SCoP", false, false);
1451 INITIALIZE_PASS_DEPENDENCY(DependenceInfo);
1452 INITIALIZE_PASS_DEPENDENCY(ScopInfoRegionPass);
1453 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass);
1454 INITIALIZE_PASS_END(IslScheduleOptimizer, "polly-opt-isl",
1455                     "Polly - Optimize schedule of SCoP", false, false)
1456