1 //===- Schedule.cpp - Calculate an optimized schedule ---------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This pass generates an entirely new schedule tree from the data dependences 11 // and iteration domains. The new schedule tree is computed in two steps: 12 // 13 // 1) The isl scheduling optimizer is run 14 // 15 // The isl scheduling optimizer creates a new schedule tree that maximizes 16 // parallelism and tileability and minimizes data-dependence distances. The 17 // algorithm used is a modified version of the ``Pluto'' algorithm: 18 // 19 // U. Bondhugula, A. Hartono, J. Ramanujam, and P. Sadayappan. 20 // A Practical Automatic Polyhedral Parallelizer and Locality Optimizer. 21 // In Proceedings of the 2008 ACM SIGPLAN Conference On Programming Language 22 // Design and Implementation, PLDI ’08, pages 101–113. ACM, 2008. 23 // 24 // 2) A set of post-scheduling transformations is applied on the schedule tree. 25 // 26 // These optimizations include: 27 // 28 // - Tiling of the innermost tilable bands 29 // - Prevectorization - The coice of a possible outer loop that is strip-mined 30 // to the innermost level to enable inner-loop 31 // vectorization. 32 // - Some optimizations for spatial locality are also planned. 33 // 34 // For a detailed description of the schedule tree itself please see section 6 35 // of: 36 // 37 // Polyhedral AST generation is more than scanning polyhedra 38 // Tobias Grosser, Sven Verdoolaege, Albert Cohen 39 // ACM Transations on Programming Languages and Systems (TOPLAS), 40 // 37(4), July 2015 41 // http://www.grosser.es/#pub-polyhedral-AST-generation 42 // 43 // This publication also contains a detailed discussion of the different options 44 // for polyhedral loop unrolling, full/partial tile separation and other uses 45 // of the schedule tree. 46 // 47 //===----------------------------------------------------------------------===// 48 49 #include "polly/ScheduleOptimizer.h" 50 #include "polly/CodeGen/CodeGeneration.h" 51 #include "polly/DependenceInfo.h" 52 #include "polly/LinkAllPasses.h" 53 #include "polly/Options.h" 54 #include "polly/ScopInfo.h" 55 #include "polly/Support/GICHelper.h" 56 #include "llvm/Analysis/TargetTransformInfo.h" 57 #include "llvm/Support/Debug.h" 58 #include "isl/aff.h" 59 #include "isl/band.h" 60 #include "isl/constraint.h" 61 #include "isl/map.h" 62 #include "isl/options.h" 63 #include "isl/printer.h" 64 #include "isl/schedule.h" 65 #include "isl/schedule_node.h" 66 #include "isl/space.h" 67 #include "isl/union_map.h" 68 #include "isl/union_set.h" 69 70 using namespace llvm; 71 using namespace polly; 72 73 #define DEBUG_TYPE "polly-opt-isl" 74 75 static cl::opt<std::string> 76 OptimizeDeps("polly-opt-optimize-only", 77 cl::desc("Only a certain kind of dependences (all/raw)"), 78 cl::Hidden, cl::init("all"), cl::ZeroOrMore, 79 cl::cat(PollyCategory)); 80 81 static cl::opt<std::string> 82 SimplifyDeps("polly-opt-simplify-deps", 83 cl::desc("Dependences should be simplified (yes/no)"), 84 cl::Hidden, cl::init("yes"), cl::ZeroOrMore, 85 cl::cat(PollyCategory)); 86 87 static cl::opt<int> MaxConstantTerm( 88 "polly-opt-max-constant-term", 89 cl::desc("The maximal constant term allowed (-1 is unlimited)"), cl::Hidden, 90 cl::init(20), cl::ZeroOrMore, cl::cat(PollyCategory)); 91 92 static cl::opt<int> MaxCoefficient( 93 "polly-opt-max-coefficient", 94 cl::desc("The maximal coefficient allowed (-1 is unlimited)"), cl::Hidden, 95 cl::init(20), cl::ZeroOrMore, cl::cat(PollyCategory)); 96 97 static cl::opt<std::string> FusionStrategy( 98 "polly-opt-fusion", cl::desc("The fusion strategy to choose (min/max)"), 99 cl::Hidden, cl::init("min"), cl::ZeroOrMore, cl::cat(PollyCategory)); 100 101 static cl::opt<std::string> 102 MaximizeBandDepth("polly-opt-maximize-bands", 103 cl::desc("Maximize the band depth (yes/no)"), cl::Hidden, 104 cl::init("yes"), cl::ZeroOrMore, cl::cat(PollyCategory)); 105 106 static cl::opt<std::string> OuterCoincidence( 107 "polly-opt-outer-coincidence", 108 cl::desc("Try to construct schedules where the outer member of each band " 109 "satisfies the coincidence constraints (yes/no)"), 110 cl::Hidden, cl::init("no"), cl::ZeroOrMore, cl::cat(PollyCategory)); 111 112 static cl::opt<int> PrevectorWidth( 113 "polly-prevect-width", 114 cl::desc( 115 "The number of loop iterations to strip-mine for pre-vectorization"), 116 cl::Hidden, cl::init(4), cl::ZeroOrMore, cl::cat(PollyCategory)); 117 118 static cl::opt<bool> FirstLevelTiling("polly-tiling", 119 cl::desc("Enable loop tiling"), 120 cl::init(true), cl::ZeroOrMore, 121 cl::cat(PollyCategory)); 122 123 static cl::opt<int> LatencyVectorFma( 124 "polly-target-latency-vector-fma", 125 cl::desc("The minimal number of cycles between issuing two " 126 "dependent consecutive vector fused multiply-add " 127 "instructions."), 128 cl::Hidden, cl::init(8), cl::ZeroOrMore, cl::cat(PollyCategory)); 129 130 static cl::opt<int> ThroughputVectorFma( 131 "polly-target-throughput-vector-fma", 132 cl::desc("A throughput of the processor floating-point arithmetic units " 133 "expressed in the number of vector fused multiply-add " 134 "instructions per clock cycle."), 135 cl::Hidden, cl::init(1), cl::ZeroOrMore, cl::cat(PollyCategory)); 136 137 // This option, along with --polly-target-2nd-cache-level-associativity, 138 // --polly-target-1st-cache-level-size, and --polly-target-2st-cache-level-size 139 // represent the parameters of the target cache, which do not have typical 140 // values that can be used by default. However, to apply the pattern matching 141 // optimizations, we use the values of the parameters of Intel Core i7-3820 142 // SandyBridge in case the parameters are not specified. Such an approach helps 143 // also to attain the high-performance on IBM POWER System S822 and IBM Power 144 // 730 Express server. 145 static cl::opt<int> FirstCacheLevelAssociativity( 146 "polly-target-1st-cache-level-associativity", 147 cl::desc("The associativity of the first cache level."), cl::Hidden, 148 cl::init(8), cl::ZeroOrMore, cl::cat(PollyCategory)); 149 150 static cl::opt<int> SecondCacheLevelAssociativity( 151 "polly-target-2nd-cache-level-associativity", 152 cl::desc("The associativity of the second cache level."), cl::Hidden, 153 cl::init(8), cl::ZeroOrMore, cl::cat(PollyCategory)); 154 155 static cl::opt<int> FirstCacheLevelSize( 156 "polly-target-1st-cache-level-size", 157 cl::desc("The size of the first cache level specified in bytes."), 158 cl::Hidden, cl::init(32768), cl::ZeroOrMore, cl::cat(PollyCategory)); 159 160 static cl::opt<int> SecondCacheLevelSize( 161 "polly-target-2nd-cache-level-size", 162 cl::desc("The size of the second level specified in bytes."), cl::Hidden, 163 cl::init(262144), cl::ZeroOrMore, cl::cat(PollyCategory)); 164 165 static cl::opt<int> VectorRegisterBitwidth( 166 "polly-target-vector-register-bitwidth", 167 cl::desc("The size in bits of a vector register (if not set, this " 168 "information is taken from LLVM's target information."), 169 cl::Hidden, cl::init(-1), cl::ZeroOrMore, cl::cat(PollyCategory)); 170 171 static cl::opt<int> FirstLevelDefaultTileSize( 172 "polly-default-tile-size", 173 cl::desc("The default tile size (if not enough were provided by" 174 " --polly-tile-sizes)"), 175 cl::Hidden, cl::init(32), cl::ZeroOrMore, cl::cat(PollyCategory)); 176 177 static cl::list<int> 178 FirstLevelTileSizes("polly-tile-sizes", 179 cl::desc("A tile size for each loop dimension, filled " 180 "with --polly-default-tile-size"), 181 cl::Hidden, cl::ZeroOrMore, cl::CommaSeparated, 182 cl::cat(PollyCategory)); 183 184 static cl::opt<bool> 185 SecondLevelTiling("polly-2nd-level-tiling", 186 cl::desc("Enable a 2nd level loop of loop tiling"), 187 cl::init(false), cl::ZeroOrMore, cl::cat(PollyCategory)); 188 189 static cl::opt<int> SecondLevelDefaultTileSize( 190 "polly-2nd-level-default-tile-size", 191 cl::desc("The default 2nd-level tile size (if not enough were provided by" 192 " --polly-2nd-level-tile-sizes)"), 193 cl::Hidden, cl::init(16), cl::ZeroOrMore, cl::cat(PollyCategory)); 194 195 static cl::list<int> 196 SecondLevelTileSizes("polly-2nd-level-tile-sizes", 197 cl::desc("A tile size for each loop dimension, filled " 198 "with --polly-default-tile-size"), 199 cl::Hidden, cl::ZeroOrMore, cl::CommaSeparated, 200 cl::cat(PollyCategory)); 201 202 static cl::opt<bool> RegisterTiling("polly-register-tiling", 203 cl::desc("Enable register tiling"), 204 cl::init(false), cl::ZeroOrMore, 205 cl::cat(PollyCategory)); 206 207 static cl::opt<int> RegisterDefaultTileSize( 208 "polly-register-tiling-default-tile-size", 209 cl::desc("The default register tile size (if not enough were provided by" 210 " --polly-register-tile-sizes)"), 211 cl::Hidden, cl::init(2), cl::ZeroOrMore, cl::cat(PollyCategory)); 212 213 static cl::opt<int> PollyPatternMatchingNcQuotient( 214 "polly-pattern-matching-nc-quotient", 215 cl::desc("Quotient that is obtained by dividing Nc, the parameter of the" 216 "macro-kernel, by Nr, the parameter of the micro-kernel"), 217 cl::Hidden, cl::init(256), cl::ZeroOrMore, cl::cat(PollyCategory)); 218 219 static cl::list<int> 220 RegisterTileSizes("polly-register-tile-sizes", 221 cl::desc("A tile size for each loop dimension, filled " 222 "with --polly-register-tile-size"), 223 cl::Hidden, cl::ZeroOrMore, cl::CommaSeparated, 224 cl::cat(PollyCategory)); 225 226 static cl::opt<bool> 227 PMBasedOpts("polly-pattern-matching-based-opts", 228 cl::desc("Perform optimizations based on pattern matching"), 229 cl::init(false), cl::ZeroOrMore, cl::cat(PollyCategory)); 230 231 static cl::opt<bool> OptimizedScops( 232 "polly-optimized-scops", 233 cl::desc("Polly - Dump polyhedral description of Scops optimized with " 234 "the isl scheduling optimizer and the set of post-scheduling " 235 "transformations is applied on the schedule tree"), 236 cl::init(false), cl::ZeroOrMore, cl::cat(PollyCategory)); 237 238 /// Create an isl_union_set, which describes the isolate option based on 239 /// IsoalteDomain. 240 /// 241 /// @param IsolateDomain An isl_set whose @p OutDimsNum last dimensions should 242 /// belong to the current band node. 243 /// @param OutDimsNum A number of dimensions that should belong to 244 /// the current band node. 245 static __isl_give isl_union_set * 246 getIsolateOptions(__isl_take isl_set *IsolateDomain, unsigned OutDimsNum) { 247 auto Dims = isl_set_dim(IsolateDomain, isl_dim_set); 248 assert(OutDimsNum <= Dims && 249 "The isl_set IsolateDomain is used to describe the range of schedule " 250 "dimensions values, which should be isolated. Consequently, the " 251 "number of its dimensions should be greater than or equal to the " 252 "number of the schedule dimensions."); 253 auto *IsolateRelation = isl_map_from_domain(IsolateDomain); 254 IsolateRelation = 255 isl_map_move_dims(IsolateRelation, isl_dim_out, 0, isl_dim_in, 256 Dims - OutDimsNum, OutDimsNum); 257 auto *IsolateOption = isl_map_wrap(IsolateRelation); 258 auto *Id = isl_id_alloc(isl_set_get_ctx(IsolateOption), "isolate", nullptr); 259 return isl_union_set_from_set(isl_set_set_tuple_id(IsolateOption, Id)); 260 } 261 262 /// Create an isl_union_set, which describes the atomic option for the dimension 263 /// of the current node. 264 /// 265 /// It may help to reduce the size of generated code. 266 /// 267 /// @param Ctx An isl_ctx, which is used to create the isl_union_set. 268 static __isl_give isl_union_set *getAtomicOptions(isl_ctx *Ctx) { 269 auto *Space = isl_space_set_alloc(Ctx, 0, 1); 270 auto *AtomicOption = isl_set_universe(Space); 271 auto *Id = isl_id_alloc(Ctx, "atomic", nullptr); 272 return isl_union_set_from_set(isl_set_set_tuple_id(AtomicOption, Id)); 273 } 274 275 /// Create an isl_union_set, which describes the option of the form 276 /// [isolate[] -> unroll[x]]. 277 /// 278 /// @param Ctx An isl_ctx, which is used to create the isl_union_set. 279 static __isl_give isl_union_set *getUnrollIsolatedSetOptions(isl_ctx *Ctx) { 280 auto *Space = isl_space_alloc(Ctx, 0, 0, 1); 281 auto *UnrollIsolatedSetOption = isl_map_universe(Space); 282 auto *DimInId = isl_id_alloc(Ctx, "isolate", nullptr); 283 auto *DimOutId = isl_id_alloc(Ctx, "unroll", nullptr); 284 UnrollIsolatedSetOption = 285 isl_map_set_tuple_id(UnrollIsolatedSetOption, isl_dim_in, DimInId); 286 UnrollIsolatedSetOption = 287 isl_map_set_tuple_id(UnrollIsolatedSetOption, isl_dim_out, DimOutId); 288 return isl_union_set_from_set(isl_map_wrap(UnrollIsolatedSetOption)); 289 } 290 291 /// Make the last dimension of Set to take values from 0 to VectorWidth - 1. 292 /// 293 /// @param Set A set, which should be modified. 294 /// @param VectorWidth A parameter, which determines the constraint. 295 static __isl_give isl_set *addExtentConstraints(__isl_take isl_set *Set, 296 int VectorWidth) { 297 auto Dims = isl_set_dim(Set, isl_dim_set); 298 auto Space = isl_set_get_space(Set); 299 auto *LocalSpace = isl_local_space_from_space(Space); 300 auto *ExtConstr = 301 isl_constraint_alloc_inequality(isl_local_space_copy(LocalSpace)); 302 ExtConstr = isl_constraint_set_constant_si(ExtConstr, 0); 303 ExtConstr = 304 isl_constraint_set_coefficient_si(ExtConstr, isl_dim_set, Dims - 1, 1); 305 Set = isl_set_add_constraint(Set, ExtConstr); 306 ExtConstr = isl_constraint_alloc_inequality(LocalSpace); 307 ExtConstr = isl_constraint_set_constant_si(ExtConstr, VectorWidth - 1); 308 ExtConstr = 309 isl_constraint_set_coefficient_si(ExtConstr, isl_dim_set, Dims - 1, -1); 310 return isl_set_add_constraint(Set, ExtConstr); 311 } 312 313 /// Build the desired set of partial tile prefixes. 314 /// 315 /// We build a set of partial tile prefixes, which are prefixes of the vector 316 /// loop that have exactly VectorWidth iterations. 317 /// 318 /// 1. Get all prefixes of the vector loop. 319 /// 2. Extend it to a set, which has exactly VectorWidth iterations for 320 /// any prefix from the set that was built on the previous step. 321 /// 3. Subtract loop domain from it, project out the vector loop dimension and 322 /// get a set of prefixes, which don't have exactly VectorWidth iterations. 323 /// 4. Subtract it from all prefixes of the vector loop and get the desired 324 /// set. 325 /// 326 /// @param ScheduleRange A range of a map, which describes a prefix schedule 327 /// relation. 328 static __isl_give isl_set * 329 getPartialTilePrefixes(__isl_take isl_set *ScheduleRange, int VectorWidth) { 330 auto Dims = isl_set_dim(ScheduleRange, isl_dim_set); 331 auto *LoopPrefixes = isl_set_project_out(isl_set_copy(ScheduleRange), 332 isl_dim_set, Dims - 1, 1); 333 auto *ExtentPrefixes = 334 isl_set_add_dims(isl_set_copy(LoopPrefixes), isl_dim_set, 1); 335 ExtentPrefixes = addExtentConstraints(ExtentPrefixes, VectorWidth); 336 auto *BadPrefixes = isl_set_subtract(ExtentPrefixes, ScheduleRange); 337 BadPrefixes = isl_set_project_out(BadPrefixes, isl_dim_set, Dims - 1, 1); 338 return isl_set_subtract(LoopPrefixes, BadPrefixes); 339 } 340 341 __isl_give isl_schedule_node *ScheduleTreeOptimizer::isolateFullPartialTiles( 342 __isl_take isl_schedule_node *Node, int VectorWidth) { 343 assert(isl_schedule_node_get_type(Node) == isl_schedule_node_band); 344 Node = isl_schedule_node_child(Node, 0); 345 Node = isl_schedule_node_child(Node, 0); 346 auto *SchedRelUMap = isl_schedule_node_get_prefix_schedule_relation(Node); 347 auto *ScheduleRelation = isl_map_from_union_map(SchedRelUMap); 348 auto *ScheduleRange = isl_map_range(ScheduleRelation); 349 auto *IsolateDomain = getPartialTilePrefixes(ScheduleRange, VectorWidth); 350 auto *AtomicOption = getAtomicOptions(isl_set_get_ctx(IsolateDomain)); 351 auto *IsolateOption = getIsolateOptions(IsolateDomain, 1); 352 Node = isl_schedule_node_parent(Node); 353 Node = isl_schedule_node_parent(Node); 354 auto *Options = isl_union_set_union(IsolateOption, AtomicOption); 355 Node = isl_schedule_node_band_set_ast_build_options(Node, Options); 356 return Node; 357 } 358 359 __isl_give isl_schedule_node * 360 ScheduleTreeOptimizer::prevectSchedBand(__isl_take isl_schedule_node *Node, 361 unsigned DimToVectorize, 362 int VectorWidth) { 363 assert(isl_schedule_node_get_type(Node) == isl_schedule_node_band); 364 365 auto Space = isl_schedule_node_band_get_space(Node); 366 auto ScheduleDimensions = isl_space_dim(Space, isl_dim_set); 367 isl_space_free(Space); 368 assert(DimToVectorize < ScheduleDimensions); 369 370 if (DimToVectorize > 0) { 371 Node = isl_schedule_node_band_split(Node, DimToVectorize); 372 Node = isl_schedule_node_child(Node, 0); 373 } 374 if (DimToVectorize < ScheduleDimensions - 1) 375 Node = isl_schedule_node_band_split(Node, 1); 376 Space = isl_schedule_node_band_get_space(Node); 377 auto Sizes = isl_multi_val_zero(Space); 378 auto Ctx = isl_schedule_node_get_ctx(Node); 379 Sizes = 380 isl_multi_val_set_val(Sizes, 0, isl_val_int_from_si(Ctx, VectorWidth)); 381 Node = isl_schedule_node_band_tile(Node, Sizes); 382 Node = isolateFullPartialTiles(Node, VectorWidth); 383 Node = isl_schedule_node_child(Node, 0); 384 // Make sure the "trivially vectorizable loop" is not unrolled. Otherwise, 385 // we will have troubles to match it in the backend. 386 Node = isl_schedule_node_band_set_ast_build_options( 387 Node, isl_union_set_read_from_str(Ctx, "{ unroll[x]: 1 = 0 }")); 388 Node = isl_schedule_node_band_sink(Node); 389 Node = isl_schedule_node_child(Node, 0); 390 if (isl_schedule_node_get_type(Node) == isl_schedule_node_leaf) 391 Node = isl_schedule_node_parent(Node); 392 isl_id *LoopMarker = isl_id_alloc(Ctx, "SIMD", nullptr); 393 Node = isl_schedule_node_insert_mark(Node, LoopMarker); 394 return Node; 395 } 396 397 __isl_give isl_schedule_node * 398 ScheduleTreeOptimizer::tileNode(__isl_take isl_schedule_node *Node, 399 const char *Identifier, ArrayRef<int> TileSizes, 400 int DefaultTileSize) { 401 auto Ctx = isl_schedule_node_get_ctx(Node); 402 auto Space = isl_schedule_node_band_get_space(Node); 403 auto Dims = isl_space_dim(Space, isl_dim_set); 404 auto Sizes = isl_multi_val_zero(Space); 405 std::string IdentifierString(Identifier); 406 for (unsigned i = 0; i < Dims; i++) { 407 auto tileSize = i < TileSizes.size() ? TileSizes[i] : DefaultTileSize; 408 Sizes = isl_multi_val_set_val(Sizes, i, isl_val_int_from_si(Ctx, tileSize)); 409 } 410 auto TileLoopMarkerStr = IdentifierString + " - Tiles"; 411 isl_id *TileLoopMarker = 412 isl_id_alloc(Ctx, TileLoopMarkerStr.c_str(), nullptr); 413 Node = isl_schedule_node_insert_mark(Node, TileLoopMarker); 414 Node = isl_schedule_node_child(Node, 0); 415 Node = isl_schedule_node_band_tile(Node, Sizes); 416 Node = isl_schedule_node_child(Node, 0); 417 auto PointLoopMarkerStr = IdentifierString + " - Points"; 418 isl_id *PointLoopMarker = 419 isl_id_alloc(Ctx, PointLoopMarkerStr.c_str(), nullptr); 420 Node = isl_schedule_node_insert_mark(Node, PointLoopMarker); 421 Node = isl_schedule_node_child(Node, 0); 422 return Node; 423 } 424 425 __isl_give isl_schedule_node * 426 ScheduleTreeOptimizer::applyRegisterTiling(__isl_take isl_schedule_node *Node, 427 llvm::ArrayRef<int> TileSizes, 428 int DefaultTileSize) { 429 auto *Ctx = isl_schedule_node_get_ctx(Node); 430 Node = tileNode(Node, "Register tiling", TileSizes, DefaultTileSize); 431 Node = isl_schedule_node_band_set_ast_build_options( 432 Node, isl_union_set_read_from_str(Ctx, "{unroll[x]}")); 433 return Node; 434 } 435 436 bool ScheduleTreeOptimizer::isTileableBandNode( 437 __isl_keep isl_schedule_node *Node) { 438 if (isl_schedule_node_get_type(Node) != isl_schedule_node_band) 439 return false; 440 441 if (isl_schedule_node_n_children(Node) != 1) 442 return false; 443 444 if (!isl_schedule_node_band_get_permutable(Node)) 445 return false; 446 447 auto Space = isl_schedule_node_band_get_space(Node); 448 auto Dims = isl_space_dim(Space, isl_dim_set); 449 isl_space_free(Space); 450 451 if (Dims <= 1) 452 return false; 453 454 auto Child = isl_schedule_node_get_child(Node, 0); 455 auto Type = isl_schedule_node_get_type(Child); 456 isl_schedule_node_free(Child); 457 458 if (Type != isl_schedule_node_leaf) 459 return false; 460 461 return true; 462 } 463 464 __isl_give isl_schedule_node * 465 ScheduleTreeOptimizer::standardBandOpts(__isl_take isl_schedule_node *Node, 466 void *User) { 467 if (FirstLevelTiling) 468 Node = tileNode(Node, "1st level tiling", FirstLevelTileSizes, 469 FirstLevelDefaultTileSize); 470 471 if (SecondLevelTiling) 472 Node = tileNode(Node, "2nd level tiling", SecondLevelTileSizes, 473 SecondLevelDefaultTileSize); 474 475 if (RegisterTiling) 476 Node = 477 applyRegisterTiling(Node, RegisterTileSizes, RegisterDefaultTileSize); 478 479 if (PollyVectorizerChoice == VECTORIZER_NONE) 480 return Node; 481 482 auto Space = isl_schedule_node_band_get_space(Node); 483 auto Dims = isl_space_dim(Space, isl_dim_set); 484 isl_space_free(Space); 485 486 for (int i = Dims - 1; i >= 0; i--) 487 if (isl_schedule_node_band_member_get_coincident(Node, i)) { 488 Node = prevectSchedBand(Node, i, PrevectorWidth); 489 break; 490 } 491 492 return Node; 493 } 494 495 /// Get the position of a dimension with a non-zero coefficient. 496 /// 497 /// Check that isl constraint @p Constraint has only one non-zero 498 /// coefficient for dimensions that have type @p DimType. If this is true, 499 /// return the position of the dimension corresponding to the non-zero 500 /// coefficient and negative value, otherwise. 501 /// 502 /// @param Constraint The isl constraint to be checked. 503 /// @param DimType The type of the dimensions. 504 /// @return The position of the dimension in case the isl 505 /// constraint satisfies the requirements, a negative 506 /// value, otherwise. 507 static int getMatMulConstraintDim(__isl_keep isl_constraint *Constraint, 508 enum isl_dim_type DimType) { 509 int DimPos = -1; 510 auto *LocalSpace = isl_constraint_get_local_space(Constraint); 511 int LocalSpaceDimNum = isl_local_space_dim(LocalSpace, DimType); 512 for (int i = 0; i < LocalSpaceDimNum; i++) { 513 auto *Val = isl_constraint_get_coefficient_val(Constraint, DimType, i); 514 if (isl_val_is_zero(Val)) { 515 isl_val_free(Val); 516 continue; 517 } 518 if (DimPos >= 0 || (DimType == isl_dim_out && !isl_val_is_one(Val)) || 519 (DimType == isl_dim_in && !isl_val_is_negone(Val))) { 520 isl_val_free(Val); 521 isl_local_space_free(LocalSpace); 522 return -1; 523 } 524 DimPos = i; 525 isl_val_free(Val); 526 } 527 isl_local_space_free(LocalSpace); 528 return DimPos; 529 } 530 531 /// Check the form of the isl constraint. 532 /// 533 /// Check that the @p DimInPos input dimension of the isl constraint 534 /// @p Constraint has a coefficient that is equal to negative one, the @p 535 /// DimOutPos has a coefficient that is equal to one and others 536 /// have coefficients equal to zero. 537 /// 538 /// @param Constraint The isl constraint to be checked. 539 /// @param DimInPos The input dimension of the isl constraint. 540 /// @param DimOutPos The output dimension of the isl constraint. 541 /// @return isl_stat_ok in case the isl constraint satisfies 542 /// the requirements, isl_stat_error otherwise. 543 static isl_stat isMatMulOperandConstraint(__isl_keep isl_constraint *Constraint, 544 int &DimInPos, int &DimOutPos) { 545 auto *Val = isl_constraint_get_constant_val(Constraint); 546 if (!isl_constraint_is_equality(Constraint) || !isl_val_is_zero(Val)) { 547 isl_val_free(Val); 548 return isl_stat_error; 549 } 550 isl_val_free(Val); 551 DimInPos = getMatMulConstraintDim(Constraint, isl_dim_in); 552 if (DimInPos < 0) 553 return isl_stat_error; 554 DimOutPos = getMatMulConstraintDim(Constraint, isl_dim_out); 555 if (DimOutPos < 0) 556 return isl_stat_error; 557 return isl_stat_ok; 558 } 559 560 /// Check that the access relation corresponds to a non-constant operand 561 /// of the matrix multiplication. 562 /// 563 /// Access relations that correspond to non-constant operands of the matrix 564 /// multiplication depend only on two input dimensions and have two output 565 /// dimensions. The function checks that the isl basic map @p bmap satisfies 566 /// the requirements. The two input dimensions can be specified via @p user 567 /// array. 568 /// 569 /// @param bmap The isl basic map to be checked. 570 /// @param user The input dimensions of @p bmap. 571 /// @return isl_stat_ok in case isl basic map satisfies the requirements, 572 /// isl_stat_error otherwise. 573 static isl_stat isMatMulOperandBasicMap(__isl_take isl_basic_map *bmap, 574 void *user) { 575 auto *Constraints = isl_basic_map_get_constraint_list(bmap); 576 isl_basic_map_free(bmap); 577 if (isl_constraint_list_n_constraint(Constraints) != 2) { 578 isl_constraint_list_free(Constraints); 579 return isl_stat_error; 580 } 581 int InPosPair[] = {-1, -1}; 582 auto DimInPos = user ? static_cast<int *>(user) : InPosPair; 583 for (int i = 0; i < 2; i++) { 584 auto *Constraint = isl_constraint_list_get_constraint(Constraints, i); 585 int InPos, OutPos; 586 if (isMatMulOperandConstraint(Constraint, InPos, OutPos) == 587 isl_stat_error || 588 OutPos > 1 || (DimInPos[OutPos] >= 0 && DimInPos[OutPos] != InPos)) { 589 isl_constraint_free(Constraint); 590 isl_constraint_list_free(Constraints); 591 return isl_stat_error; 592 } 593 DimInPos[OutPos] = InPos; 594 isl_constraint_free(Constraint); 595 } 596 isl_constraint_list_free(Constraints); 597 return isl_stat_ok; 598 } 599 600 /// Permute the two dimensions of the isl map. 601 /// 602 /// Permute @p DstPos and @p SrcPos dimensions of the isl map @p Map that 603 /// have type @p DimType. 604 /// 605 /// @param Map The isl map to be modified. 606 /// @param DimType The type of the dimensions. 607 /// @param DstPos The first dimension. 608 /// @param SrcPos The second dimension. 609 /// @return The modified map. 610 __isl_give isl_map *permuteDimensions(__isl_take isl_map *Map, 611 enum isl_dim_type DimType, 612 unsigned DstPos, unsigned SrcPos) { 613 assert(DstPos < isl_map_dim(Map, DimType) && 614 SrcPos < isl_map_dim(Map, DimType)); 615 if (DstPos == SrcPos) 616 return Map; 617 isl_id *DimId = nullptr; 618 if (isl_map_has_tuple_id(Map, DimType)) 619 DimId = isl_map_get_tuple_id(Map, DimType); 620 auto FreeDim = DimType == isl_dim_in ? isl_dim_out : isl_dim_in; 621 isl_id *FreeDimId = nullptr; 622 if (isl_map_has_tuple_id(Map, FreeDim)) 623 FreeDimId = isl_map_get_tuple_id(Map, FreeDim); 624 auto MaxDim = std::max(DstPos, SrcPos); 625 auto MinDim = std::min(DstPos, SrcPos); 626 Map = isl_map_move_dims(Map, FreeDim, 0, DimType, MaxDim, 1); 627 Map = isl_map_move_dims(Map, FreeDim, 0, DimType, MinDim, 1); 628 Map = isl_map_move_dims(Map, DimType, MinDim, FreeDim, 1, 1); 629 Map = isl_map_move_dims(Map, DimType, MaxDim, FreeDim, 0, 1); 630 if (DimId) 631 Map = isl_map_set_tuple_id(Map, DimType, DimId); 632 if (FreeDimId) 633 Map = isl_map_set_tuple_id(Map, FreeDim, FreeDimId); 634 return Map; 635 } 636 637 /// Check the form of the access relation. 638 /// 639 /// Check that the access relation @p AccMap has the form M[i][j], where i 640 /// is a @p FirstPos and j is a @p SecondPos. 641 /// 642 /// @param AccMap The access relation to be checked. 643 /// @param FirstPos The index of the input dimension that is mapped to 644 /// the first output dimension. 645 /// @param SecondPos The index of the input dimension that is mapped to the 646 /// second output dimension. 647 /// @return True in case @p AccMap has the expected form and false, 648 /// otherwise. 649 static bool isMatMulOperandAcc(__isl_keep isl_map *AccMap, int &FirstPos, 650 int &SecondPos) { 651 int DimInPos[] = {FirstPos, SecondPos}; 652 if (isl_map_foreach_basic_map(AccMap, isMatMulOperandBasicMap, 653 static_cast<void *>(DimInPos)) != isl_stat_ok || 654 DimInPos[0] < 0 || DimInPos[1] < 0) 655 return false; 656 FirstPos = DimInPos[0]; 657 SecondPos = DimInPos[1]; 658 return true; 659 } 660 661 /// Does the memory access represent a non-scalar operand of the matrix 662 /// multiplication. 663 /// 664 /// Check that the memory access @p MemAccess is the read access to a non-scalar 665 /// operand of the matrix multiplication or its result. 666 /// 667 /// @param MemAccess The memory access to be checked. 668 /// @param MMI Parameters of the matrix multiplication operands. 669 /// @return True in case the memory access represents the read access 670 /// to a non-scalar operand of the matrix multiplication and 671 /// false, otherwise. 672 static bool isMatMulNonScalarReadAccess(MemoryAccess *MemAccess, 673 MatMulInfoTy &MMI) { 674 if (!MemAccess->isArrayKind() || !MemAccess->isRead()) 675 return false; 676 isl_map *AccMap = MemAccess->getAccessRelation(); 677 if (isMatMulOperandAcc(AccMap, MMI.i, MMI.j) && !MMI.ReadFromC && 678 isl_map_n_basic_map(AccMap) == 1) { 679 MMI.ReadFromC = MemAccess; 680 isl_map_free(AccMap); 681 return true; 682 } 683 if (isMatMulOperandAcc(AccMap, MMI.i, MMI.k) && !MMI.A && 684 isl_map_n_basic_map(AccMap) == 1) { 685 MMI.A = MemAccess; 686 isl_map_free(AccMap); 687 return true; 688 } 689 if (isMatMulOperandAcc(AccMap, MMI.k, MMI.j) && !MMI.B && 690 isl_map_n_basic_map(AccMap) == 1) { 691 MMI.B = MemAccess; 692 isl_map_free(AccMap); 693 return true; 694 } 695 isl_map_free(AccMap); 696 return false; 697 } 698 699 /// Check accesses to operands of the matrix multiplication. 700 /// 701 /// Check that accesses of the SCoP statement, which corresponds to 702 /// the partial schedule @p PartialSchedule, are scalar in terms of loops 703 /// containing the matrix multiplication, in case they do not represent 704 /// accesses to the non-scalar operands of the matrix multiplication or 705 /// its result. 706 /// 707 /// @param PartialSchedule The partial schedule of the SCoP statement. 708 /// @param MMI Parameters of the matrix multiplication operands. 709 /// @return True in case the corresponding SCoP statement 710 /// represents matrix multiplication and false, 711 /// otherwise. 712 static bool containsOnlyMatrMultAcc(__isl_keep isl_map *PartialSchedule, 713 MatMulInfoTy &MMI) { 714 auto *InputDimId = isl_map_get_tuple_id(PartialSchedule, isl_dim_in); 715 auto *Stmt = static_cast<ScopStmt *>(isl_id_get_user(InputDimId)); 716 isl_id_free(InputDimId); 717 unsigned OutDimNum = isl_map_dim(PartialSchedule, isl_dim_out); 718 assert(OutDimNum > 2 && "In case of the matrix multiplication the loop nest " 719 "and, consequently, the corresponding scheduling " 720 "functions have at least three dimensions."); 721 auto *MapI = permuteDimensions(isl_map_copy(PartialSchedule), isl_dim_out, 722 MMI.i, OutDimNum - 1); 723 auto *MapJ = permuteDimensions(isl_map_copy(PartialSchedule), isl_dim_out, 724 MMI.j, OutDimNum - 1); 725 auto *MapK = permuteDimensions(isl_map_copy(PartialSchedule), isl_dim_out, 726 MMI.k, OutDimNum - 1); 727 for (auto *MemA = Stmt->begin(); MemA != Stmt->end() - 1; MemA++) { 728 auto *MemAccessPtr = *MemA; 729 if (MemAccessPtr->isArrayKind() && MemAccessPtr != MMI.WriteToC && 730 !isMatMulNonScalarReadAccess(MemAccessPtr, MMI) && 731 !(MemAccessPtr->isStrideZero(isl_map_copy(MapI)) && 732 MemAccessPtr->isStrideZero(isl_map_copy(MapJ)) && 733 MemAccessPtr->isStrideZero(isl_map_copy(MapK)))) { 734 isl_map_free(MapI); 735 isl_map_free(MapJ); 736 isl_map_free(MapK); 737 return false; 738 } 739 } 740 isl_map_free(MapI); 741 isl_map_free(MapJ); 742 isl_map_free(MapK); 743 return true; 744 } 745 746 /// Check for dependencies corresponding to the matrix multiplication. 747 /// 748 /// Check that there is only true dependence of the form 749 /// S(..., k, ...) -> S(..., k + 1, …), where S is the SCoP statement 750 /// represented by @p Schedule and k is @p Pos. Such a dependence corresponds 751 /// to the dependency produced by the matrix multiplication. 752 /// 753 /// @param Schedule The schedule of the SCoP statement. 754 /// @param D The SCoP dependencies. 755 /// @param Pos The parameter to desribe an acceptable true dependence. 756 /// In case it has a negative value, try to determine its 757 /// acceptable value. 758 /// @return True in case dependencies correspond to the matrix multiplication 759 /// and false, otherwise. 760 static bool containsOnlyMatMulDep(__isl_keep isl_map *Schedule, 761 const Dependences *D, int &Pos) { 762 auto *WAR = D->getDependences(Dependences::TYPE_WAR); 763 if (!isl_union_map_is_empty(WAR)) { 764 isl_union_map_free(WAR); 765 return false; 766 } 767 isl_union_map_free(WAR); 768 auto *Dep = D->getDependences(Dependences::TYPE_RAW); 769 auto *Red = D->getDependences(Dependences::TYPE_RED); 770 if (Red) 771 Dep = isl_union_map_union(Dep, Red); 772 auto *DomainSpace = isl_space_domain(isl_map_get_space(Schedule)); 773 auto *Space = isl_space_map_from_domain_and_range(isl_space_copy(DomainSpace), 774 DomainSpace); 775 auto *Deltas = isl_map_deltas(isl_union_map_extract_map(Dep, Space)); 776 isl_union_map_free(Dep); 777 int DeltasDimNum = isl_set_dim(Deltas, isl_dim_set); 778 isl_set_free(Deltas); 779 for (int i = 0; i < DeltasDimNum; i++) { 780 auto *Val = isl_set_plain_get_val_if_fixed(Deltas, isl_dim_set, i); 781 Pos = Pos < 0 && isl_val_is_one(Val) ? i : Pos; 782 if (isl_val_is_nan(Val) || 783 !(isl_val_is_zero(Val) || (i == Pos && isl_val_is_one(Val)))) { 784 isl_val_free(Val); 785 return false; 786 } 787 isl_val_free(Val); 788 } 789 if (DeltasDimNum == 0 || Pos < 0) 790 return false; 791 return true; 792 } 793 794 /// Check if the SCoP statement could probably be optimized with analytical 795 /// modeling. 796 /// 797 /// containsMatrMult tries to determine whether the following conditions 798 /// are true: 799 /// 1. The last memory access modeling an array, MA1, represents writing to 800 /// memory and has the form S(..., i1, ..., i2, ...) -> M(i1, i2) or 801 /// S(..., i2, ..., i1, ...) -> M(i1, i2), where S is the SCoP statement 802 /// under consideration. 803 /// 2. There is only one loop-carried true dependency, and it has the 804 /// form S(..., i3, ...) -> S(..., i3 + 1, ...), and there are no 805 /// loop-carried or anti dependencies. 806 /// 3. SCoP contains three access relations, MA2, MA3, and MA4 that represent 807 /// reading from memory and have the form S(..., i3, ...) -> M(i1, i3), 808 /// S(..., i3, ...) -> M(i3, i2), S(...) -> M(i1, i2), respectively, 809 /// and all memory accesses of the SCoP that are different from MA1, MA2, 810 /// MA3, and MA4 have stride 0, if the innermost loop is exchanged with any 811 /// of loops i1, i2 and i3. 812 /// 813 /// @param PartialSchedule The PartialSchedule that contains a SCoP statement 814 /// to check. 815 /// @D The SCoP dependencies. 816 /// @MMI Parameters of the matrix multiplication operands. 817 static bool containsMatrMult(__isl_keep isl_map *PartialSchedule, 818 const Dependences *D, MatMulInfoTy &MMI) { 819 auto *InputDimsId = isl_map_get_tuple_id(PartialSchedule, isl_dim_in); 820 auto *Stmt = static_cast<ScopStmt *>(isl_id_get_user(InputDimsId)); 821 isl_id_free(InputDimsId); 822 if (Stmt->size() <= 1) 823 return false; 824 for (auto *MemA = Stmt->end() - 1; MemA != Stmt->begin(); MemA--) { 825 auto *MemAccessPtr = *MemA; 826 if (!MemAccessPtr->isArrayKind()) 827 continue; 828 if (!MemAccessPtr->isWrite()) 829 return false; 830 auto *AccMap = MemAccessPtr->getAccessRelation(); 831 if (isl_map_n_basic_map(AccMap) != 1 || 832 !isMatMulOperandAcc(AccMap, MMI.i, MMI.j)) { 833 isl_map_free(AccMap); 834 return false; 835 } 836 isl_map_free(AccMap); 837 MMI.WriteToC = MemAccessPtr; 838 break; 839 } 840 841 if (!containsOnlyMatMulDep(PartialSchedule, D, MMI.k)) 842 return false; 843 844 if (!MMI.WriteToC || !containsOnlyMatrMultAcc(PartialSchedule, MMI)) 845 return false; 846 847 if (!MMI.A || !MMI.B || !MMI.ReadFromC) 848 return false; 849 return true; 850 } 851 852 /// Permute two dimensions of the band node. 853 /// 854 /// Permute FirstDim and SecondDim dimensions of the Node. 855 /// 856 /// @param Node The band node to be modified. 857 /// @param FirstDim The first dimension to be permuted. 858 /// @param SecondDim The second dimension to be permuted. 859 static __isl_give isl_schedule_node * 860 permuteBandNodeDimensions(__isl_take isl_schedule_node *Node, unsigned FirstDim, 861 unsigned SecondDim) { 862 assert(isl_schedule_node_get_type(Node) == isl_schedule_node_band && 863 isl_schedule_node_band_n_member(Node) > std::max(FirstDim, SecondDim)); 864 auto PartialSchedule = isl_schedule_node_band_get_partial_schedule(Node); 865 auto PartialScheduleFirstDim = 866 isl_multi_union_pw_aff_get_union_pw_aff(PartialSchedule, FirstDim); 867 auto PartialScheduleSecondDim = 868 isl_multi_union_pw_aff_get_union_pw_aff(PartialSchedule, SecondDim); 869 PartialSchedule = isl_multi_union_pw_aff_set_union_pw_aff( 870 PartialSchedule, SecondDim, PartialScheduleFirstDim); 871 PartialSchedule = isl_multi_union_pw_aff_set_union_pw_aff( 872 PartialSchedule, FirstDim, PartialScheduleSecondDim); 873 Node = isl_schedule_node_delete(Node); 874 Node = isl_schedule_node_insert_partial_schedule(Node, PartialSchedule); 875 return Node; 876 } 877 878 __isl_give isl_schedule_node *ScheduleTreeOptimizer::createMicroKernel( 879 __isl_take isl_schedule_node *Node, MicroKernelParamsTy MicroKernelParams) { 880 applyRegisterTiling(Node, {MicroKernelParams.Mr, MicroKernelParams.Nr}, 1); 881 Node = isl_schedule_node_parent(isl_schedule_node_parent(Node)); 882 Node = permuteBandNodeDimensions(Node, 0, 1); 883 return isl_schedule_node_child(isl_schedule_node_child(Node, 0), 0); 884 } 885 886 __isl_give isl_schedule_node *ScheduleTreeOptimizer::createMacroKernel( 887 __isl_take isl_schedule_node *Node, MacroKernelParamsTy MacroKernelParams) { 888 assert(isl_schedule_node_get_type(Node) == isl_schedule_node_band); 889 if (MacroKernelParams.Mc == 1 && MacroKernelParams.Nc == 1 && 890 MacroKernelParams.Kc == 1) 891 return Node; 892 int DimOutNum = isl_schedule_node_band_n_member(Node); 893 std::vector<int> TileSizes(DimOutNum, 1); 894 TileSizes[DimOutNum - 3] = MacroKernelParams.Mc; 895 TileSizes[DimOutNum - 2] = MacroKernelParams.Nc; 896 TileSizes[DimOutNum - 1] = MacroKernelParams.Kc; 897 Node = tileNode(Node, "1st level tiling", TileSizes, 1); 898 Node = isl_schedule_node_parent(isl_schedule_node_parent(Node)); 899 Node = permuteBandNodeDimensions(Node, DimOutNum - 2, DimOutNum - 1); 900 Node = permuteBandNodeDimensions(Node, DimOutNum - 3, DimOutNum - 1); 901 return isl_schedule_node_child(isl_schedule_node_child(Node, 0), 0); 902 } 903 904 /// Get the size of the widest type of the matrix multiplication operands 905 /// in bytes, including alignment padding. 906 /// 907 /// @param MMI Parameters of the matrix multiplication operands. 908 /// @return The size of the widest type of the matrix multiplication operands 909 /// in bytes, including alignment padding. 910 static uint64_t getMatMulAlignTypeSize(MatMulInfoTy MMI) { 911 auto *S = MMI.A->getStatement()->getParent(); 912 auto &DL = S->getFunction().getParent()->getDataLayout(); 913 auto ElementSizeA = DL.getTypeAllocSize(MMI.A->getElementType()); 914 auto ElementSizeB = DL.getTypeAllocSize(MMI.B->getElementType()); 915 auto ElementSizeC = DL.getTypeAllocSize(MMI.WriteToC->getElementType()); 916 return std::max({ElementSizeA, ElementSizeB, ElementSizeC}); 917 } 918 919 /// Get the size of the widest type of the matrix multiplication operands 920 /// in bits. 921 /// 922 /// @param MMI Parameters of the matrix multiplication operands. 923 /// @return The size of the widest type of the matrix multiplication operands 924 /// in bits. 925 static uint64_t getMatMulTypeSize(MatMulInfoTy MMI) { 926 auto *S = MMI.A->getStatement()->getParent(); 927 auto &DL = S->getFunction().getParent()->getDataLayout(); 928 auto ElementSizeA = DL.getTypeSizeInBits(MMI.A->getElementType()); 929 auto ElementSizeB = DL.getTypeSizeInBits(MMI.B->getElementType()); 930 auto ElementSizeC = DL.getTypeSizeInBits(MMI.WriteToC->getElementType()); 931 return std::max({ElementSizeA, ElementSizeB, ElementSizeC}); 932 } 933 934 /// Get parameters of the BLIS micro kernel. 935 /// 936 /// We choose the Mr and Nr parameters of the micro kernel to be large enough 937 /// such that no stalls caused by the combination of latencies and dependencies 938 /// are introduced during the updates of the resulting matrix of the matrix 939 /// multiplication. However, they should also be as small as possible to 940 /// release more registers for entries of multiplied matrices. 941 /// 942 /// @param TTI Target Transform Info. 943 /// @param MMI Parameters of the matrix multiplication operands. 944 /// @return The structure of type MicroKernelParamsTy. 945 /// @see MicroKernelParamsTy 946 static struct MicroKernelParamsTy 947 getMicroKernelParams(const llvm::TargetTransformInfo *TTI, MatMulInfoTy MMI) { 948 assert(TTI && "The target transform info should be provided."); 949 950 // Nvec - Number of double-precision floating-point numbers that can be hold 951 // by a vector register. Use 2 by default. 952 long RegisterBitwidth = VectorRegisterBitwidth; 953 954 if (RegisterBitwidth == -1) 955 RegisterBitwidth = TTI->getRegisterBitWidth(true); 956 auto ElementSize = getMatMulTypeSize(MMI); 957 assert(ElementSize > 0 && "The element size of the matrix multiplication " 958 "operands should be greater than zero."); 959 auto Nvec = RegisterBitwidth / ElementSize; 960 if (Nvec == 0) 961 Nvec = 2; 962 int Nr = 963 ceil(sqrt(Nvec * LatencyVectorFma * ThroughputVectorFma) / Nvec) * Nvec; 964 int Mr = ceil(Nvec * LatencyVectorFma * ThroughputVectorFma / Nr); 965 return {Mr, Nr}; 966 } 967 968 /// Get parameters of the BLIS macro kernel. 969 /// 970 /// During the computation of matrix multiplication, blocks of partitioned 971 /// matrices are mapped to different layers of the memory hierarchy. 972 /// To optimize data reuse, blocks should be ideally kept in cache between 973 /// iterations. Since parameters of the macro kernel determine sizes of these 974 /// blocks, there are upper and lower bounds on these parameters. 975 /// 976 /// @param MicroKernelParams Parameters of the micro-kernel 977 /// to be taken into account. 978 /// @param MMI Parameters of the matrix multiplication operands. 979 /// @return The structure of type MacroKernelParamsTy. 980 /// @see MacroKernelParamsTy 981 /// @see MicroKernelParamsTy 982 static struct MacroKernelParamsTy 983 getMacroKernelParams(const MicroKernelParamsTy &MicroKernelParams, 984 MatMulInfoTy MMI) { 985 // According to www.cs.utexas.edu/users/flame/pubs/TOMS-BLIS-Analytical.pdf, 986 // it requires information about the first two levels of a cache to determine 987 // all the parameters of a macro-kernel. It also checks that an associativity 988 // degree of a cache level is greater than two. Otherwise, another algorithm 989 // for determination of the parameters should be used. 990 if (!(MicroKernelParams.Mr > 0 && MicroKernelParams.Nr > 0 && 991 FirstCacheLevelSize > 0 && SecondCacheLevelSize > 0 && 992 FirstCacheLevelAssociativity > 2 && SecondCacheLevelAssociativity > 2)) 993 return {1, 1, 1}; 994 // The quotient should be greater than zero. 995 if (PollyPatternMatchingNcQuotient <= 0) 996 return {1, 1, 1}; 997 int Car = floor( 998 (FirstCacheLevelAssociativity - 1) / 999 (1 + static_cast<double>(MicroKernelParams.Nr) / MicroKernelParams.Mr)); 1000 auto ElementSize = getMatMulAlignTypeSize(MMI); 1001 assert(ElementSize > 0 && "The element size of the matrix multiplication " 1002 "operands should be greater than zero."); 1003 int Kc = (Car * FirstCacheLevelSize) / 1004 (MicroKernelParams.Mr * FirstCacheLevelAssociativity * ElementSize); 1005 double Cac = 1006 static_cast<double>(Kc * ElementSize * SecondCacheLevelAssociativity) / 1007 SecondCacheLevelSize; 1008 int Mc = floor((SecondCacheLevelAssociativity - 2) / Cac); 1009 int Nc = PollyPatternMatchingNcQuotient * MicroKernelParams.Nr; 1010 return {Mc, Nc, Kc}; 1011 } 1012 1013 /// Create an access relation that is specific to 1014 /// the matrix multiplication pattern. 1015 /// 1016 /// Create an access relation of the following form: 1017 /// [O0, O1, O2, O3, O4, O5, O6, O7, O8] -> [OI, O5, OJ] 1018 /// where I is @p FirstDim, J is @p SecondDim. 1019 /// 1020 /// It can be used, for example, to create relations that helps to consequently 1021 /// access elements of operands of a matrix multiplication after creation of 1022 /// the BLIS micro and macro kernels. 1023 /// 1024 /// @see ScheduleTreeOptimizer::createMicroKernel 1025 /// @see ScheduleTreeOptimizer::createMacroKernel 1026 /// 1027 /// Subsequently, the described access relation is applied to the range of 1028 /// @p MapOldIndVar, that is used to map original induction variables to 1029 /// the ones, which are produced by schedule transformations. It helps to 1030 /// define relations using a new space and, at the same time, keep them 1031 /// in the original one. 1032 /// 1033 /// @param MapOldIndVar The relation, which maps original induction variables 1034 /// to the ones, which are produced by schedule 1035 /// transformations. 1036 /// @param FirstDim, SecondDim The input dimensions that are used to define 1037 /// the specified access relation. 1038 /// @return The specified access relation. 1039 __isl_give isl_map *getMatMulAccRel(__isl_take isl_map *MapOldIndVar, 1040 unsigned FirstDim, unsigned SecondDim) { 1041 auto *Ctx = isl_map_get_ctx(MapOldIndVar); 1042 auto *AccessRelSpace = isl_space_alloc(Ctx, 0, 9, 3); 1043 auto *AccessRel = isl_map_universe(AccessRelSpace); 1044 AccessRel = isl_map_equate(AccessRel, isl_dim_in, FirstDim, isl_dim_out, 0); 1045 AccessRel = isl_map_equate(AccessRel, isl_dim_in, 5, isl_dim_out, 1); 1046 AccessRel = isl_map_equate(AccessRel, isl_dim_in, SecondDim, isl_dim_out, 2); 1047 return isl_map_apply_range(MapOldIndVar, AccessRel); 1048 } 1049 1050 __isl_give isl_schedule_node * 1051 createExtensionNode(__isl_take isl_schedule_node *Node, 1052 __isl_take isl_map *ExtensionMap) { 1053 auto *Extension = isl_union_map_from_map(ExtensionMap); 1054 auto *NewNode = isl_schedule_node_from_extension(Extension); 1055 return isl_schedule_node_graft_before(Node, NewNode); 1056 } 1057 1058 /// Apply the packing transformation. 1059 /// 1060 /// The packing transformation can be described as a data-layout 1061 /// transformation that requires to introduce a new array, copy data 1062 /// to the array, and change memory access locations to reference the array. 1063 /// It can be used to ensure that elements of the new array are read in-stride 1064 /// access, aligned to cache lines boundaries, and preloaded into certain cache 1065 /// levels. 1066 /// 1067 /// As an example let us consider the packing of the array A that would help 1068 /// to read its elements with in-stride access. An access to the array A 1069 /// is represented by an access relation that has the form 1070 /// S[i, j, k] -> A[i, k]. The scheduling function of the SCoP statement S has 1071 /// the form S[i,j, k] -> [floor((j mod Nc) / Nr), floor((i mod Mc) / Mr), 1072 /// k mod Kc, j mod Nr, i mod Mr]. 1073 /// 1074 /// To ensure that elements of the array A are read in-stride access, we add 1075 /// a new array Packed_A[Mc/Mr][Kc][Mr] to the SCoP, using 1076 /// Scop::createScopArrayInfo, change the access relation 1077 /// S[i, j, k] -> A[i, k] to 1078 /// S[i, j, k] -> Packed_A[floor((i mod Mc) / Mr), k mod Kc, i mod Mr], using 1079 /// MemoryAccess::setNewAccessRelation, and copy the data to the array, using 1080 /// the copy statement created by Scop::addScopStmt. 1081 /// 1082 /// @param Node The schedule node to be optimized. 1083 /// @param MapOldIndVar The relation, which maps original induction variables 1084 /// to the ones, which are produced by schedule 1085 /// transformations. 1086 /// @param MicroParams, MacroParams Parameters of the BLIS kernel 1087 /// to be taken into account. 1088 /// @param MMI Parameters of the matrix multiplication operands. 1089 /// @return The optimized schedule node. 1090 static __isl_give isl_schedule_node *optimizeDataLayoutMatrMulPattern( 1091 __isl_take isl_schedule_node *Node, __isl_take isl_map *MapOldIndVar, 1092 MicroKernelParamsTy MicroParams, MacroKernelParamsTy MacroParams, 1093 MatMulInfoTy &MMI) { 1094 auto InputDimsId = isl_map_get_tuple_id(MapOldIndVar, isl_dim_in); 1095 auto *Stmt = static_cast<ScopStmt *>(isl_id_get_user(InputDimsId)); 1096 isl_id_free(InputDimsId); 1097 1098 // Create a copy statement that corresponds to the memory access to the 1099 // matrix B, the second operand of the matrix multiplication. 1100 Node = isl_schedule_node_parent(isl_schedule_node_parent(Node)); 1101 Node = isl_schedule_node_parent(isl_schedule_node_parent(Node)); 1102 Node = isl_schedule_node_parent(Node); 1103 Node = isl_schedule_node_child(isl_schedule_node_band_split(Node, 2), 0); 1104 auto *AccRel = getMatMulAccRel(isl_map_copy(MapOldIndVar), 3, 7); 1105 unsigned FirstDimSize = MacroParams.Nc / MicroParams.Nr; 1106 unsigned SecondDimSize = MacroParams.Kc; 1107 unsigned ThirdDimSize = MicroParams.Nr; 1108 auto *SAI = Stmt->getParent()->createScopArrayInfo( 1109 MMI.B->getElementType(), "Packed_B", 1110 {FirstDimSize, SecondDimSize, ThirdDimSize}); 1111 AccRel = isl_map_set_tuple_id(AccRel, isl_dim_out, SAI->getBasePtrId()); 1112 auto *OldAcc = MMI.B->getAccessRelation(); 1113 MMI.B->setNewAccessRelation(AccRel); 1114 auto *ExtMap = 1115 isl_map_project_out(isl_map_copy(MapOldIndVar), isl_dim_out, 2, 1116 isl_map_dim(MapOldIndVar, isl_dim_out) - 2); 1117 ExtMap = isl_map_reverse(ExtMap); 1118 ExtMap = isl_map_fix_si(ExtMap, isl_dim_out, MMI.i, 0); 1119 auto *Domain = Stmt->getDomain(); 1120 1121 // Restrict the domains of the copy statements to only execute when also its 1122 // originating statement is executed. 1123 auto *DomainId = isl_set_get_tuple_id(Domain); 1124 auto *NewStmt = Stmt->getParent()->addScopStmt( 1125 OldAcc, MMI.B->getAccessRelation(), isl_set_copy(Domain)); 1126 ExtMap = isl_map_set_tuple_id(ExtMap, isl_dim_out, isl_id_copy(DomainId)); 1127 ExtMap = isl_map_intersect_range(ExtMap, isl_set_copy(Domain)); 1128 ExtMap = isl_map_set_tuple_id(ExtMap, isl_dim_out, NewStmt->getDomainId()); 1129 Node = createExtensionNode(Node, ExtMap); 1130 1131 // Create a copy statement that corresponds to the memory access 1132 // to the matrix A, the first operand of the matrix multiplication. 1133 Node = isl_schedule_node_child(Node, 0); 1134 AccRel = getMatMulAccRel(isl_map_copy(MapOldIndVar), 4, 6); 1135 FirstDimSize = MacroParams.Mc / MicroParams.Mr; 1136 ThirdDimSize = MicroParams.Mr; 1137 SAI = Stmt->getParent()->createScopArrayInfo( 1138 MMI.A->getElementType(), "Packed_A", 1139 {FirstDimSize, SecondDimSize, ThirdDimSize}); 1140 AccRel = isl_map_set_tuple_id(AccRel, isl_dim_out, SAI->getBasePtrId()); 1141 OldAcc = MMI.A->getAccessRelation(); 1142 MMI.A->setNewAccessRelation(AccRel); 1143 ExtMap = isl_map_project_out(MapOldIndVar, isl_dim_out, 3, 1144 isl_map_dim(MapOldIndVar, isl_dim_out) - 3); 1145 ExtMap = isl_map_reverse(ExtMap); 1146 ExtMap = isl_map_fix_si(ExtMap, isl_dim_out, MMI.j, 0); 1147 NewStmt = Stmt->getParent()->addScopStmt(OldAcc, MMI.A->getAccessRelation(), 1148 isl_set_copy(Domain)); 1149 1150 // Restrict the domains of the copy statements to only execute when also its 1151 // originating statement is executed. 1152 ExtMap = isl_map_set_tuple_id(ExtMap, isl_dim_out, DomainId); 1153 ExtMap = isl_map_intersect_range(ExtMap, Domain); 1154 ExtMap = isl_map_set_tuple_id(ExtMap, isl_dim_out, NewStmt->getDomainId()); 1155 Node = createExtensionNode(Node, ExtMap); 1156 Node = isl_schedule_node_child(isl_schedule_node_child(Node, 0), 0); 1157 return isl_schedule_node_child(isl_schedule_node_child(Node, 0), 0); 1158 } 1159 1160 /// Get a relation mapping induction variables produced by schedule 1161 /// transformations to the original ones. 1162 /// 1163 /// @param Node The schedule node produced as the result of creation 1164 /// of the BLIS kernels. 1165 /// @param MicroKernelParams, MacroKernelParams Parameters of the BLIS kernel 1166 /// to be taken into account. 1167 /// @return The relation mapping original induction variables to the ones 1168 /// produced by schedule transformation. 1169 /// @see ScheduleTreeOptimizer::createMicroKernel 1170 /// @see ScheduleTreeOptimizer::createMacroKernel 1171 /// @see getMacroKernelParams 1172 __isl_give isl_map * 1173 getInductionVariablesSubstitution(__isl_take isl_schedule_node *Node, 1174 MicroKernelParamsTy MicroKernelParams, 1175 MacroKernelParamsTy MacroKernelParams) { 1176 auto *Child = isl_schedule_node_get_child(Node, 0); 1177 auto *UnMapOldIndVar = isl_schedule_node_get_prefix_schedule_union_map(Child); 1178 isl_schedule_node_free(Child); 1179 auto *MapOldIndVar = isl_map_from_union_map(UnMapOldIndVar); 1180 if (isl_map_dim(MapOldIndVar, isl_dim_out) > 9) 1181 MapOldIndVar = 1182 isl_map_project_out(MapOldIndVar, isl_dim_out, 0, 1183 isl_map_dim(MapOldIndVar, isl_dim_out) - 9); 1184 return MapOldIndVar; 1185 } 1186 1187 /// Isolate a set of partial tile prefixes and unroll the isolated part. 1188 /// 1189 /// The set should ensure that it contains only partial tile prefixes that have 1190 /// exactly Mr x Nr iterations of the two innermost loops produced by 1191 /// the optimization of the matrix multiplication. Mr and Nr are parameters of 1192 /// the micro-kernel. 1193 /// 1194 /// In case of parametric bounds, this helps to auto-vectorize the unrolled 1195 /// innermost loops, using the SLP vectorizer. 1196 /// 1197 /// @param Node The schedule node to be modified. 1198 /// @param MicroKernelParams Parameters of the micro-kernel 1199 /// to be taken into account. 1200 /// @return The modified isl_schedule_node. 1201 static __isl_give isl_schedule_node * 1202 isolateAndUnrollMatMulInnerLoops(__isl_take isl_schedule_node *Node, 1203 struct MicroKernelParamsTy MicroKernelParams) { 1204 auto *Child = isl_schedule_node_get_child(Node, 0); 1205 auto *UnMapOldIndVar = isl_schedule_node_get_prefix_schedule_relation(Child); 1206 isl_schedule_node_free(Child); 1207 auto *Prefix = isl_map_range(isl_map_from_union_map(UnMapOldIndVar)); 1208 auto Dims = isl_set_dim(Prefix, isl_dim_set); 1209 Prefix = isl_set_project_out(Prefix, isl_dim_set, Dims - 1, 1); 1210 Prefix = getPartialTilePrefixes(Prefix, MicroKernelParams.Nr); 1211 Prefix = getPartialTilePrefixes(Prefix, MicroKernelParams.Mr); 1212 auto *IsolateOption = getIsolateOptions( 1213 isl_set_add_dims(isl_set_copy(Prefix), isl_dim_set, 3), 3); 1214 auto *Ctx = isl_schedule_node_get_ctx(Node); 1215 auto *AtomicOption = getAtomicOptions(Ctx); 1216 auto *Options = 1217 isl_union_set_union(IsolateOption, isl_union_set_copy(AtomicOption)); 1218 Options = isl_union_set_union(Options, getUnrollIsolatedSetOptions(Ctx)); 1219 Node = isl_schedule_node_band_set_ast_build_options(Node, Options); 1220 Node = isl_schedule_node_parent(isl_schedule_node_parent(Node)); 1221 IsolateOption = getIsolateOptions(Prefix, 3); 1222 Options = isl_union_set_union(IsolateOption, AtomicOption); 1223 Node = isl_schedule_node_band_set_ast_build_options(Node, Options); 1224 Node = isl_schedule_node_child(isl_schedule_node_child(Node, 0), 0); 1225 return Node; 1226 } 1227 1228 __isl_give isl_schedule_node *ScheduleTreeOptimizer::optimizeMatMulPattern( 1229 __isl_take isl_schedule_node *Node, const llvm::TargetTransformInfo *TTI, 1230 MatMulInfoTy &MMI) { 1231 assert(TTI && "The target transform info should be provided."); 1232 int DimOutNum = isl_schedule_node_band_n_member(Node); 1233 assert(DimOutNum > 2 && "In case of the matrix multiplication the loop nest " 1234 "and, consequently, the corresponding scheduling " 1235 "functions have at least three dimensions."); 1236 Node = permuteBandNodeDimensions(Node, MMI.i, DimOutNum - 3); 1237 int NewJ = MMI.j == DimOutNum - 3 ? MMI.i : MMI.j; 1238 int NewK = MMI.k == DimOutNum - 3 ? MMI.i : MMI.k; 1239 Node = permuteBandNodeDimensions(Node, NewJ, DimOutNum - 2); 1240 NewK = MMI.k == DimOutNum - 2 ? MMI.j : MMI.k; 1241 Node = permuteBandNodeDimensions(Node, NewK, DimOutNum - 1); 1242 auto MicroKernelParams = getMicroKernelParams(TTI, MMI); 1243 auto MacroKernelParams = getMacroKernelParams(MicroKernelParams, MMI); 1244 Node = createMacroKernel(Node, MacroKernelParams); 1245 Node = createMicroKernel(Node, MicroKernelParams); 1246 if (MacroKernelParams.Mc == 1 || MacroKernelParams.Nc == 1 || 1247 MacroKernelParams.Kc == 1) 1248 return Node; 1249 auto *MapOldIndVar = getInductionVariablesSubstitution( 1250 Node, MicroKernelParams, MacroKernelParams); 1251 if (!MapOldIndVar) 1252 return Node; 1253 Node = isolateAndUnrollMatMulInnerLoops(Node, MicroKernelParams); 1254 return optimizeDataLayoutMatrMulPattern(Node, MapOldIndVar, MicroKernelParams, 1255 MacroKernelParams, MMI); 1256 } 1257 1258 bool ScheduleTreeOptimizer::isMatrMultPattern( 1259 __isl_keep isl_schedule_node *Node, const Dependences *D, 1260 MatMulInfoTy &MMI) { 1261 auto *PartialSchedule = 1262 isl_schedule_node_band_get_partial_schedule_union_map(Node); 1263 if (isl_schedule_node_band_n_member(Node) < 3 || 1264 isl_union_map_n_map(PartialSchedule) != 1) { 1265 isl_union_map_free(PartialSchedule); 1266 return false; 1267 } 1268 auto *NewPartialSchedule = isl_map_from_union_map(PartialSchedule); 1269 if (containsMatrMult(NewPartialSchedule, D, MMI)) { 1270 isl_map_free(NewPartialSchedule); 1271 return true; 1272 } 1273 isl_map_free(NewPartialSchedule); 1274 return false; 1275 } 1276 1277 __isl_give isl_schedule_node * 1278 ScheduleTreeOptimizer::optimizeBand(__isl_take isl_schedule_node *Node, 1279 void *User) { 1280 if (!isTileableBandNode(Node)) 1281 return Node; 1282 1283 const OptimizerAdditionalInfoTy *OAI = 1284 static_cast<const OptimizerAdditionalInfoTy *>(User); 1285 1286 MatMulInfoTy MMI; 1287 if (PMBasedOpts && User && isMatrMultPattern(Node, OAI->D, MMI)) { 1288 DEBUG(dbgs() << "The matrix multiplication pattern was detected\n"); 1289 return optimizeMatMulPattern(Node, OAI->TTI, MMI); 1290 } 1291 1292 return standardBandOpts(Node, User); 1293 } 1294 1295 __isl_give isl_schedule * 1296 ScheduleTreeOptimizer::optimizeSchedule(__isl_take isl_schedule *Schedule, 1297 const OptimizerAdditionalInfoTy *OAI) { 1298 isl_schedule_node *Root = isl_schedule_get_root(Schedule); 1299 Root = optimizeScheduleNode(Root, OAI); 1300 isl_schedule_free(Schedule); 1301 auto S = isl_schedule_node_get_schedule(Root); 1302 isl_schedule_node_free(Root); 1303 return S; 1304 } 1305 1306 __isl_give isl_schedule_node *ScheduleTreeOptimizer::optimizeScheduleNode( 1307 __isl_take isl_schedule_node *Node, const OptimizerAdditionalInfoTy *OAI) { 1308 Node = isl_schedule_node_map_descendant_bottom_up( 1309 Node, optimizeBand, const_cast<void *>(static_cast<const void *>(OAI))); 1310 return Node; 1311 } 1312 1313 bool ScheduleTreeOptimizer::isProfitableSchedule( 1314 Scop &S, __isl_keep isl_schedule *NewSchedule) { 1315 // To understand if the schedule has been optimized we check if the schedule 1316 // has changed at all. 1317 // TODO: We can improve this by tracking if any necessarily beneficial 1318 // transformations have been performed. This can e.g. be tiling, loop 1319 // interchange, or ...) We can track this either at the place where the 1320 // transformation has been performed or, in case of automatic ILP based 1321 // optimizations, by comparing (yet to be defined) performance metrics 1322 // before/after the scheduling optimizer 1323 // (e.g., #stride-one accesses) 1324 if (S.containsExtensionNode(NewSchedule)) 1325 return true; 1326 auto *NewScheduleMap = isl_schedule_get_map(NewSchedule); 1327 isl_union_map *OldSchedule = S.getSchedule(); 1328 assert(OldSchedule && "Only IslScheduleOptimizer can insert extension nodes " 1329 "that make Scop::getSchedule() return nullptr."); 1330 bool changed = !isl_union_map_is_equal(OldSchedule, NewScheduleMap); 1331 isl_union_map_free(OldSchedule); 1332 isl_union_map_free(NewScheduleMap); 1333 return changed; 1334 } 1335 1336 namespace { 1337 class IslScheduleOptimizer : public ScopPass { 1338 public: 1339 static char ID; 1340 explicit IslScheduleOptimizer() : ScopPass(ID) { LastSchedule = nullptr; } 1341 1342 ~IslScheduleOptimizer() { isl_schedule_free(LastSchedule); } 1343 1344 /// Optimize the schedule of the SCoP @p S. 1345 bool runOnScop(Scop &S) override; 1346 1347 /// Print the new schedule for the SCoP @p S. 1348 void printScop(raw_ostream &OS, Scop &S) const override; 1349 1350 /// Register all analyses and transformation required. 1351 void getAnalysisUsage(AnalysisUsage &AU) const override; 1352 1353 /// Release the internal memory. 1354 void releaseMemory() override { 1355 isl_schedule_free(LastSchedule); 1356 LastSchedule = nullptr; 1357 } 1358 1359 private: 1360 isl_schedule *LastSchedule; 1361 }; 1362 } // namespace 1363 1364 char IslScheduleOptimizer::ID = 0; 1365 1366 bool IslScheduleOptimizer::runOnScop(Scop &S) { 1367 1368 // Skip empty SCoPs but still allow code generation as it will delete the 1369 // loops present but not needed. 1370 if (S.getSize() == 0) { 1371 S.markAsOptimized(); 1372 return false; 1373 } 1374 1375 const Dependences &D = 1376 getAnalysis<DependenceInfo>().getDependences(Dependences::AL_Statement); 1377 1378 if (!D.hasValidDependences()) 1379 return false; 1380 1381 isl_schedule_free(LastSchedule); 1382 LastSchedule = nullptr; 1383 1384 // Build input data. 1385 int ValidityKinds = 1386 Dependences::TYPE_RAW | Dependences::TYPE_WAR | Dependences::TYPE_WAW; 1387 int ProximityKinds; 1388 1389 if (OptimizeDeps == "all") 1390 ProximityKinds = 1391 Dependences::TYPE_RAW | Dependences::TYPE_WAR | Dependences::TYPE_WAW; 1392 else if (OptimizeDeps == "raw") 1393 ProximityKinds = Dependences::TYPE_RAW; 1394 else { 1395 errs() << "Do not know how to optimize for '" << OptimizeDeps << "'" 1396 << " Falling back to optimizing all dependences.\n"; 1397 ProximityKinds = 1398 Dependences::TYPE_RAW | Dependences::TYPE_WAR | Dependences::TYPE_WAW; 1399 } 1400 1401 isl_union_set *Domain = S.getDomains(); 1402 1403 if (!Domain) 1404 return false; 1405 1406 isl_union_map *Validity = D.getDependences(ValidityKinds); 1407 isl_union_map *Proximity = D.getDependences(ProximityKinds); 1408 1409 // Simplify the dependences by removing the constraints introduced by the 1410 // domains. This can speed up the scheduling time significantly, as large 1411 // constant coefficients will be removed from the dependences. The 1412 // introduction of some additional dependences reduces the possible 1413 // transformations, but in most cases, such transformation do not seem to be 1414 // interesting anyway. In some cases this option may stop the scheduler to 1415 // find any schedule. 1416 if (SimplifyDeps == "yes") { 1417 Validity = isl_union_map_gist_domain(Validity, isl_union_set_copy(Domain)); 1418 Validity = isl_union_map_gist_range(Validity, isl_union_set_copy(Domain)); 1419 Proximity = 1420 isl_union_map_gist_domain(Proximity, isl_union_set_copy(Domain)); 1421 Proximity = isl_union_map_gist_range(Proximity, isl_union_set_copy(Domain)); 1422 } else if (SimplifyDeps != "no") { 1423 errs() << "warning: Option -polly-opt-simplify-deps should either be 'yes' " 1424 "or 'no'. Falling back to default: 'yes'\n"; 1425 } 1426 1427 DEBUG(dbgs() << "\n\nCompute schedule from: "); 1428 DEBUG(dbgs() << "Domain := " << stringFromIslObj(Domain) << ";\n"); 1429 DEBUG(dbgs() << "Proximity := " << stringFromIslObj(Proximity) << ";\n"); 1430 DEBUG(dbgs() << "Validity := " << stringFromIslObj(Validity) << ";\n"); 1431 1432 unsigned IslSerializeSCCs; 1433 1434 if (FusionStrategy == "max") { 1435 IslSerializeSCCs = 0; 1436 } else if (FusionStrategy == "min") { 1437 IslSerializeSCCs = 1; 1438 } else { 1439 errs() << "warning: Unknown fusion strategy. Falling back to maximal " 1440 "fusion.\n"; 1441 IslSerializeSCCs = 0; 1442 } 1443 1444 int IslMaximizeBands; 1445 1446 if (MaximizeBandDepth == "yes") { 1447 IslMaximizeBands = 1; 1448 } else if (MaximizeBandDepth == "no") { 1449 IslMaximizeBands = 0; 1450 } else { 1451 errs() << "warning: Option -polly-opt-maximize-bands should either be 'yes'" 1452 " or 'no'. Falling back to default: 'yes'\n"; 1453 IslMaximizeBands = 1; 1454 } 1455 1456 int IslOuterCoincidence; 1457 1458 if (OuterCoincidence == "yes") { 1459 IslOuterCoincidence = 1; 1460 } else if (OuterCoincidence == "no") { 1461 IslOuterCoincidence = 0; 1462 } else { 1463 errs() << "warning: Option -polly-opt-outer-coincidence should either be " 1464 "'yes' or 'no'. Falling back to default: 'no'\n"; 1465 IslOuterCoincidence = 0; 1466 } 1467 1468 isl_ctx *Ctx = S.getIslCtx(); 1469 1470 isl_options_set_schedule_outer_coincidence(Ctx, IslOuterCoincidence); 1471 isl_options_set_schedule_serialize_sccs(Ctx, IslSerializeSCCs); 1472 isl_options_set_schedule_maximize_band_depth(Ctx, IslMaximizeBands); 1473 isl_options_set_schedule_max_constant_term(Ctx, MaxConstantTerm); 1474 isl_options_set_schedule_max_coefficient(Ctx, MaxCoefficient); 1475 isl_options_set_tile_scale_tile_loops(Ctx, 0); 1476 1477 auto OnErrorStatus = isl_options_get_on_error(Ctx); 1478 isl_options_set_on_error(Ctx, ISL_ON_ERROR_CONTINUE); 1479 1480 isl_schedule_constraints *ScheduleConstraints; 1481 ScheduleConstraints = isl_schedule_constraints_on_domain(Domain); 1482 ScheduleConstraints = 1483 isl_schedule_constraints_set_proximity(ScheduleConstraints, Proximity); 1484 ScheduleConstraints = isl_schedule_constraints_set_validity( 1485 ScheduleConstraints, isl_union_map_copy(Validity)); 1486 ScheduleConstraints = 1487 isl_schedule_constraints_set_coincidence(ScheduleConstraints, Validity); 1488 isl_schedule *Schedule; 1489 Schedule = isl_schedule_constraints_compute_schedule(ScheduleConstraints); 1490 isl_options_set_on_error(Ctx, OnErrorStatus); 1491 1492 // In cases the scheduler is not able to optimize the code, we just do not 1493 // touch the schedule. 1494 if (!Schedule) 1495 return false; 1496 1497 DEBUG({ 1498 auto *P = isl_printer_to_str(Ctx); 1499 P = isl_printer_set_yaml_style(P, ISL_YAML_STYLE_BLOCK); 1500 P = isl_printer_print_schedule(P, Schedule); 1501 auto *str = isl_printer_get_str(P); 1502 dbgs() << "NewScheduleTree: \n" << str << "\n"; 1503 free(str); 1504 isl_printer_free(P); 1505 }); 1506 1507 Function &F = S.getFunction(); 1508 auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 1509 const OptimizerAdditionalInfoTy OAI = {TTI, const_cast<Dependences *>(&D)}; 1510 isl_schedule *NewSchedule = 1511 ScheduleTreeOptimizer::optimizeSchedule(Schedule, &OAI); 1512 1513 if (!ScheduleTreeOptimizer::isProfitableSchedule(S, NewSchedule)) { 1514 isl_schedule_free(NewSchedule); 1515 return false; 1516 } 1517 1518 S.setScheduleTree(NewSchedule); 1519 S.markAsOptimized(); 1520 1521 if (OptimizedScops) 1522 S.dump(); 1523 1524 return false; 1525 } 1526 1527 void IslScheduleOptimizer::printScop(raw_ostream &OS, Scop &) const { 1528 isl_printer *p; 1529 char *ScheduleStr; 1530 1531 OS << "Calculated schedule:\n"; 1532 1533 if (!LastSchedule) { 1534 OS << "n/a\n"; 1535 return; 1536 } 1537 1538 p = isl_printer_to_str(isl_schedule_get_ctx(LastSchedule)); 1539 p = isl_printer_print_schedule(p, LastSchedule); 1540 ScheduleStr = isl_printer_get_str(p); 1541 isl_printer_free(p); 1542 1543 OS << ScheduleStr << "\n"; 1544 } 1545 1546 void IslScheduleOptimizer::getAnalysisUsage(AnalysisUsage &AU) const { 1547 ScopPass::getAnalysisUsage(AU); 1548 AU.addRequired<DependenceInfo>(); 1549 AU.addRequired<TargetTransformInfoWrapperPass>(); 1550 } 1551 1552 Pass *polly::createIslScheduleOptimizerPass() { 1553 return new IslScheduleOptimizer(); 1554 } 1555 1556 INITIALIZE_PASS_BEGIN(IslScheduleOptimizer, "polly-opt-isl", 1557 "Polly - Optimize schedule of SCoP", false, false); 1558 INITIALIZE_PASS_DEPENDENCY(DependenceInfo); 1559 INITIALIZE_PASS_DEPENDENCY(ScopInfoRegionPass); 1560 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass); 1561 INITIALIZE_PASS_END(IslScheduleOptimizer, "polly-opt-isl", 1562 "Polly - Optimize schedule of SCoP", false, false) 1563