1 //===- Schedule.cpp - Calculate an optimized schedule ---------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This pass generates an entirely new schedule tree from the data dependences 10 // and iteration domains. The new schedule tree is computed in two steps: 11 // 12 // 1) The isl scheduling optimizer is run 13 // 14 // The isl scheduling optimizer creates a new schedule tree that maximizes 15 // parallelism and tileability and minimizes data-dependence distances. The 16 // algorithm used is a modified version of the ``Pluto'' algorithm: 17 // 18 // U. Bondhugula, A. Hartono, J. Ramanujam, and P. Sadayappan. 19 // A Practical Automatic Polyhedral Parallelizer and Locality Optimizer. 20 // In Proceedings of the 2008 ACM SIGPLAN Conference On Programming Language 21 // Design and Implementation, PLDI ’08, pages 101–113. ACM, 2008. 22 // 23 // 2) A set of post-scheduling transformations is applied on the schedule tree. 24 // 25 // These optimizations include: 26 // 27 // - Tiling of the innermost tilable bands 28 // - Prevectorization - The choice of a possible outer loop that is strip-mined 29 // to the innermost level to enable inner-loop 30 // vectorization. 31 // - Some optimizations for spatial locality are also planned. 32 // 33 // For a detailed description of the schedule tree itself please see section 6 34 // of: 35 // 36 // Polyhedral AST generation is more than scanning polyhedra 37 // Tobias Grosser, Sven Verdoolaege, Albert Cohen 38 // ACM Transactions on Programming Languages and Systems (TOPLAS), 39 // 37(4), July 2015 40 // http://www.grosser.es/#pub-polyhedral-AST-generation 41 // 42 // This publication also contains a detailed discussion of the different options 43 // for polyhedral loop unrolling, full/partial tile separation and other uses 44 // of the schedule tree. 45 // 46 //===----------------------------------------------------------------------===// 47 48 #include "polly/ScheduleOptimizer.h" 49 #include "polly/CodeGen/CodeGeneration.h" 50 #include "polly/DependenceInfo.h" 51 #include "polly/LinkAllPasses.h" 52 #include "polly/ManualOptimizer.h" 53 #include "polly/Options.h" 54 #include "polly/ScheduleTreeTransform.h" 55 #include "polly/ScopInfo.h" 56 #include "polly/ScopPass.h" 57 #include "polly/Simplify.h" 58 #include "polly/Support/ISLOStream.h" 59 #include "llvm/ADT/Sequence.h" 60 #include "llvm/ADT/Statistic.h" 61 #include "llvm/Analysis/TargetTransformInfo.h" 62 #include "llvm/IR/Function.h" 63 #include "llvm/InitializePasses.h" 64 #include "llvm/Support/CommandLine.h" 65 #include "llvm/Support/Debug.h" 66 #include "llvm/Support/raw_ostream.h" 67 #include "isl/ctx.h" 68 #include "isl/options.h" 69 #include "isl/printer.h" 70 #include "isl/schedule.h" 71 #include "isl/schedule_node.h" 72 #include "isl/union_map.h" 73 #include "isl/union_set.h" 74 #include <algorithm> 75 #include <cassert> 76 #include <cmath> 77 #include <cstdint> 78 #include <cstdlib> 79 #include <string> 80 #include <vector> 81 82 using namespace llvm; 83 using namespace polly; 84 85 #define DEBUG_TYPE "polly-opt-isl" 86 87 static cl::opt<std::string> 88 OptimizeDeps("polly-opt-optimize-only", 89 cl::desc("Only a certain kind of dependences (all/raw)"), 90 cl::Hidden, cl::init("all"), cl::ZeroOrMore, 91 cl::cat(PollyCategory)); 92 93 static cl::opt<std::string> 94 SimplifyDeps("polly-opt-simplify-deps", 95 cl::desc("Dependences should be simplified (yes/no)"), 96 cl::Hidden, cl::init("yes"), cl::ZeroOrMore, 97 cl::cat(PollyCategory)); 98 99 static cl::opt<int> MaxConstantTerm( 100 "polly-opt-max-constant-term", 101 cl::desc("The maximal constant term allowed (-1 is unlimited)"), cl::Hidden, 102 cl::init(20), cl::ZeroOrMore, cl::cat(PollyCategory)); 103 104 static cl::opt<int> MaxCoefficient( 105 "polly-opt-max-coefficient", 106 cl::desc("The maximal coefficient allowed (-1 is unlimited)"), cl::Hidden, 107 cl::init(20), cl::ZeroOrMore, cl::cat(PollyCategory)); 108 109 static cl::opt<std::string> FusionStrategy( 110 "polly-opt-fusion", cl::desc("The fusion strategy to choose (min/max)"), 111 cl::Hidden, cl::init("min"), cl::ZeroOrMore, cl::cat(PollyCategory)); 112 113 static cl::opt<std::string> 114 MaximizeBandDepth("polly-opt-maximize-bands", 115 cl::desc("Maximize the band depth (yes/no)"), cl::Hidden, 116 cl::init("yes"), cl::ZeroOrMore, cl::cat(PollyCategory)); 117 118 static cl::opt<std::string> OuterCoincidence( 119 "polly-opt-outer-coincidence", 120 cl::desc("Try to construct schedules where the outer member of each band " 121 "satisfies the coincidence constraints (yes/no)"), 122 cl::Hidden, cl::init("no"), cl::ZeroOrMore, cl::cat(PollyCategory)); 123 124 static cl::opt<int> PrevectorWidth( 125 "polly-prevect-width", 126 cl::desc( 127 "The number of loop iterations to strip-mine for pre-vectorization"), 128 cl::Hidden, cl::init(4), cl::ZeroOrMore, cl::cat(PollyCategory)); 129 130 static cl::opt<bool> FirstLevelTiling("polly-tiling", 131 cl::desc("Enable loop tiling"), 132 cl::init(true), cl::ZeroOrMore, 133 cl::cat(PollyCategory)); 134 135 static cl::opt<int> LatencyVectorFma( 136 "polly-target-latency-vector-fma", 137 cl::desc("The minimal number of cycles between issuing two " 138 "dependent consecutive vector fused multiply-add " 139 "instructions."), 140 cl::Hidden, cl::init(8), cl::ZeroOrMore, cl::cat(PollyCategory)); 141 142 static cl::opt<int> ThroughputVectorFma( 143 "polly-target-throughput-vector-fma", 144 cl::desc("A throughput of the processor floating-point arithmetic units " 145 "expressed in the number of vector fused multiply-add " 146 "instructions per clock cycle."), 147 cl::Hidden, cl::init(1), cl::ZeroOrMore, cl::cat(PollyCategory)); 148 149 // This option, along with --polly-target-2nd-cache-level-associativity, 150 // --polly-target-1st-cache-level-size, and --polly-target-2st-cache-level-size 151 // represent the parameters of the target cache, which do not have typical 152 // values that can be used by default. However, to apply the pattern matching 153 // optimizations, we use the values of the parameters of Intel Core i7-3820 154 // SandyBridge in case the parameters are not specified or not provided by the 155 // TargetTransformInfo. 156 static cl::opt<int> FirstCacheLevelAssociativity( 157 "polly-target-1st-cache-level-associativity", 158 cl::desc("The associativity of the first cache level."), cl::Hidden, 159 cl::init(-1), cl::ZeroOrMore, cl::cat(PollyCategory)); 160 161 static cl::opt<int> FirstCacheLevelDefaultAssociativity( 162 "polly-target-1st-cache-level-default-associativity", 163 cl::desc("The default associativity of the first cache level" 164 " (if not enough were provided by the TargetTransformInfo)."), 165 cl::Hidden, cl::init(8), cl::ZeroOrMore, cl::cat(PollyCategory)); 166 167 static cl::opt<int> SecondCacheLevelAssociativity( 168 "polly-target-2nd-cache-level-associativity", 169 cl::desc("The associativity of the second cache level."), cl::Hidden, 170 cl::init(-1), cl::ZeroOrMore, cl::cat(PollyCategory)); 171 172 static cl::opt<int> SecondCacheLevelDefaultAssociativity( 173 "polly-target-2nd-cache-level-default-associativity", 174 cl::desc("The default associativity of the second cache level" 175 " (if not enough were provided by the TargetTransformInfo)."), 176 cl::Hidden, cl::init(8), cl::ZeroOrMore, cl::cat(PollyCategory)); 177 178 static cl::opt<int> FirstCacheLevelSize( 179 "polly-target-1st-cache-level-size", 180 cl::desc("The size of the first cache level specified in bytes."), 181 cl::Hidden, cl::init(-1), cl::ZeroOrMore, cl::cat(PollyCategory)); 182 183 static cl::opt<int> FirstCacheLevelDefaultSize( 184 "polly-target-1st-cache-level-default-size", 185 cl::desc("The default size of the first cache level specified in bytes" 186 " (if not enough were provided by the TargetTransformInfo)."), 187 cl::Hidden, cl::init(32768), cl::ZeroOrMore, cl::cat(PollyCategory)); 188 189 static cl::opt<int> SecondCacheLevelSize( 190 "polly-target-2nd-cache-level-size", 191 cl::desc("The size of the second level specified in bytes."), cl::Hidden, 192 cl::init(-1), cl::ZeroOrMore, cl::cat(PollyCategory)); 193 194 static cl::opt<int> SecondCacheLevelDefaultSize( 195 "polly-target-2nd-cache-level-default-size", 196 cl::desc("The default size of the second cache level specified in bytes" 197 " (if not enough were provided by the TargetTransformInfo)."), 198 cl::Hidden, cl::init(262144), cl::ZeroOrMore, cl::cat(PollyCategory)); 199 200 static cl::opt<int> VectorRegisterBitwidth( 201 "polly-target-vector-register-bitwidth", 202 cl::desc("The size in bits of a vector register (if not set, this " 203 "information is taken from LLVM's target information."), 204 cl::Hidden, cl::init(-1), cl::ZeroOrMore, cl::cat(PollyCategory)); 205 206 static cl::opt<int> FirstLevelDefaultTileSize( 207 "polly-default-tile-size", 208 cl::desc("The default tile size (if not enough were provided by" 209 " --polly-tile-sizes)"), 210 cl::Hidden, cl::init(32), cl::ZeroOrMore, cl::cat(PollyCategory)); 211 212 static cl::list<int> 213 FirstLevelTileSizes("polly-tile-sizes", 214 cl::desc("A tile size for each loop dimension, filled " 215 "with --polly-default-tile-size"), 216 cl::Hidden, cl::ZeroOrMore, cl::CommaSeparated, 217 cl::cat(PollyCategory)); 218 219 static cl::opt<bool> 220 SecondLevelTiling("polly-2nd-level-tiling", 221 cl::desc("Enable a 2nd level loop of loop tiling"), 222 cl::init(false), cl::ZeroOrMore, cl::cat(PollyCategory)); 223 224 static cl::opt<int> SecondLevelDefaultTileSize( 225 "polly-2nd-level-default-tile-size", 226 cl::desc("The default 2nd-level tile size (if not enough were provided by" 227 " --polly-2nd-level-tile-sizes)"), 228 cl::Hidden, cl::init(16), cl::ZeroOrMore, cl::cat(PollyCategory)); 229 230 static cl::list<int> 231 SecondLevelTileSizes("polly-2nd-level-tile-sizes", 232 cl::desc("A tile size for each loop dimension, filled " 233 "with --polly-default-tile-size"), 234 cl::Hidden, cl::ZeroOrMore, cl::CommaSeparated, 235 cl::cat(PollyCategory)); 236 237 static cl::opt<bool> RegisterTiling("polly-register-tiling", 238 cl::desc("Enable register tiling"), 239 cl::init(false), cl::ZeroOrMore, 240 cl::cat(PollyCategory)); 241 242 static cl::opt<int> RegisterDefaultTileSize( 243 "polly-register-tiling-default-tile-size", 244 cl::desc("The default register tile size (if not enough were provided by" 245 " --polly-register-tile-sizes)"), 246 cl::Hidden, cl::init(2), cl::ZeroOrMore, cl::cat(PollyCategory)); 247 248 static cl::opt<int> PollyPatternMatchingNcQuotient( 249 "polly-pattern-matching-nc-quotient", 250 cl::desc("Quotient that is obtained by dividing Nc, the parameter of the" 251 "macro-kernel, by Nr, the parameter of the micro-kernel"), 252 cl::Hidden, cl::init(256), cl::ZeroOrMore, cl::cat(PollyCategory)); 253 254 static cl::list<int> 255 RegisterTileSizes("polly-register-tile-sizes", 256 cl::desc("A tile size for each loop dimension, filled " 257 "with --polly-register-tile-size"), 258 cl::Hidden, cl::ZeroOrMore, cl::CommaSeparated, 259 cl::cat(PollyCategory)); 260 261 static cl::opt<bool> PragmaBasedOpts( 262 "polly-pragma-based-opts", 263 cl::desc("Apply user-directed transformation from metadata"), 264 cl::init(true), cl::ZeroOrMore, cl::cat(PollyCategory)); 265 266 static cl::opt<bool> 267 PMBasedOpts("polly-pattern-matching-based-opts", 268 cl::desc("Perform optimizations based on pattern matching"), 269 cl::init(true), cl::ZeroOrMore, cl::cat(PollyCategory)); 270 271 static cl::opt<bool> OptimizedScops( 272 "polly-optimized-scops", 273 cl::desc("Polly - Dump polyhedral description of Scops optimized with " 274 "the isl scheduling optimizer and the set of post-scheduling " 275 "transformations is applied on the schedule tree"), 276 cl::init(false), cl::ZeroOrMore, cl::cat(PollyCategory)); 277 278 STATISTIC(ScopsProcessed, "Number of scops processed"); 279 STATISTIC(ScopsRescheduled, "Number of scops rescheduled"); 280 STATISTIC(ScopsOptimized, "Number of scops optimized"); 281 282 STATISTIC(NumAffineLoopsOptimized, "Number of affine loops optimized"); 283 STATISTIC(NumBoxedLoopsOptimized, "Number of boxed loops optimized"); 284 285 #define THREE_STATISTICS(VARNAME, DESC) \ 286 static Statistic VARNAME[3] = { \ 287 {DEBUG_TYPE, #VARNAME "0", DESC " (original)"}, \ 288 {DEBUG_TYPE, #VARNAME "1", DESC " (after scheduler)"}, \ 289 {DEBUG_TYPE, #VARNAME "2", DESC " (after optimizer)"}} 290 291 THREE_STATISTICS(NumBands, "Number of bands"); 292 THREE_STATISTICS(NumBandMembers, "Number of band members"); 293 THREE_STATISTICS(NumCoincident, "Number of coincident band members"); 294 THREE_STATISTICS(NumPermutable, "Number of permutable bands"); 295 THREE_STATISTICS(NumFilters, "Number of filter nodes"); 296 THREE_STATISTICS(NumExtension, "Number of extension nodes"); 297 298 STATISTIC(FirstLevelTileOpts, "Number of first level tiling applied"); 299 STATISTIC(SecondLevelTileOpts, "Number of second level tiling applied"); 300 STATISTIC(RegisterTileOpts, "Number of register tiling applied"); 301 STATISTIC(PrevectOpts, "Number of strip-mining for prevectorization applied"); 302 STATISTIC(MatMulOpts, 303 "Number of matrix multiplication patterns detected and optimized"); 304 305 namespace { 306 /// Parameters of the micro kernel. 307 /// 308 /// Parameters, which determine sizes of rank-1 (i.e., outer product) update 309 /// used in the optimized matrix multiplication. 310 struct MicroKernelParamsTy { 311 int Mr; 312 int Nr; 313 }; 314 315 /// Parameters of the macro kernel. 316 /// 317 /// Parameters, which determine sizes of blocks of partitioned matrices 318 /// used in the optimized matrix multiplication. 319 struct MacroKernelParamsTy { 320 int Mc; 321 int Nc; 322 int Kc; 323 }; 324 325 /// Additional parameters of the schedule optimizer. 326 /// 327 /// Target Transform Info and the SCoP dependencies used by the schedule 328 /// optimizer. 329 struct OptimizerAdditionalInfoTy { 330 const llvm::TargetTransformInfo *TTI; 331 const Dependences *D; 332 }; 333 334 /// Parameters of the matrix multiplication operands. 335 /// 336 /// Parameters, which describe access relations that represent operands of the 337 /// matrix multiplication. 338 struct MatMulInfoTy { 339 MemoryAccess *A = nullptr; 340 MemoryAccess *B = nullptr; 341 MemoryAccess *ReadFromC = nullptr; 342 MemoryAccess *WriteToC = nullptr; 343 int i = -1; 344 int j = -1; 345 int k = -1; 346 }; 347 348 class ScheduleTreeOptimizer { 349 public: 350 /// Apply schedule tree transformations. 351 /// 352 /// This function takes an (possibly already optimized) schedule tree and 353 /// applies a set of additional optimizations on the schedule tree. The 354 /// transformations applied include: 355 /// 356 /// - Tiling 357 /// - Prevectorization 358 /// 359 /// @param Schedule The schedule object the transformations will be applied 360 /// to. 361 /// @param OAI Target Transform Info and the SCoP dependencies. 362 /// @returns The transformed schedule. 363 static isl::schedule 364 optimizeSchedule(isl::schedule Schedule, 365 const OptimizerAdditionalInfoTy *OAI = nullptr); 366 367 /// Apply schedule tree transformations. 368 /// 369 /// This function takes a node in an (possibly already optimized) schedule 370 /// tree and applies a set of additional optimizations on this schedule tree 371 /// node and its descendants. The transformations applied include: 372 /// 373 /// - Tiling 374 /// - Prevectorization 375 /// 376 /// @param Node The schedule object post-transformations will be applied to. 377 /// @param OAI Target Transform Info and the SCoP dependencies. 378 /// @returns The transformed schedule. 379 static isl::schedule_node 380 optimizeScheduleNode(isl::schedule_node Node, 381 const OptimizerAdditionalInfoTy *OAI = nullptr); 382 383 /// Decide if the @p NewSchedule is profitable for @p S. 384 /// 385 /// @param S The SCoP we optimize. 386 /// @param NewSchedule The new schedule we computed. 387 /// 388 /// @return True, if we believe @p NewSchedule is an improvement for @p S. 389 static bool isProfitableSchedule(polly::Scop &S, isl::schedule NewSchedule); 390 391 /// Isolate a set of partial tile prefixes. 392 /// 393 /// This set should ensure that it contains only partial tile prefixes that 394 /// have exactly VectorWidth iterations. 395 /// 396 /// @param Node A schedule node band, which is a parent of a band node, 397 /// that contains a vector loop. 398 /// @return Modified isl_schedule_node. 399 static isl::schedule_node isolateFullPartialTiles(isl::schedule_node Node, 400 int VectorWidth); 401 402 private: 403 /// Tile a schedule node. 404 /// 405 /// @param Node The node to tile. 406 /// @param Identifier An name that identifies this kind of tiling and 407 /// that is used to mark the tiled loops in the 408 /// generated AST. 409 /// @param TileSizes A vector of tile sizes that should be used for 410 /// tiling. 411 /// @param DefaultTileSize A default tile size that is used for dimensions 412 /// that are not covered by the TileSizes vector. 413 static isl::schedule_node tileNode(isl::schedule_node Node, 414 const char *Identifier, 415 llvm::ArrayRef<int> TileSizes, 416 int DefaultTileSize); 417 418 /// Tile a schedule node and unroll point loops. 419 /// 420 /// @param Node The node to register tile. 421 /// @param TileSizes A vector of tile sizes that should be used for 422 /// tiling. 423 /// @param DefaultTileSize A default tile size that is used for dimensions 424 static isl::schedule_node applyRegisterTiling(isl::schedule_node Node, 425 llvm::ArrayRef<int> TileSizes, 426 int DefaultTileSize); 427 428 /// Apply the BLIS matmul optimization pattern. 429 /// 430 /// Make the loops containing the matrix multiplication be the innermost 431 /// loops and apply the BLIS matmul optimization pattern. BLIS implements 432 /// gemm as three nested loops around a macro-kernel, plus two packing 433 /// routines. The macro-kernel is implemented in terms of two additional 434 /// loops around a micro-kernel. The micro-kernel is a loop around a rank-1 435 /// (i.e., outer product) update. 436 /// 437 /// For a detailed description please see [1]. 438 /// 439 /// The order of the loops defines the data reused in the BLIS implementation 440 /// of gemm ([1]). In particular, elements of the matrix B, the second 441 /// operand of matrix multiplication, are reused between iterations of the 442 /// innermost loop. To keep the reused data in cache, only elements of matrix 443 /// A, the first operand of matrix multiplication, should be evicted during 444 /// an iteration of the innermost loop. To provide such a cache replacement 445 /// policy, elements of the matrix A can, in particular, be loaded first and, 446 /// consequently, be least-recently-used. 447 /// 448 /// In our case matrices are stored in row-major order instead of 449 /// column-major order used in the BLIS implementation ([1]). It affects only 450 /// on the form of the BLIS micro kernel and the computation of its 451 /// parameters. In particular, reused elements of the matrix B are 452 /// successively multiplied by specific elements of the matrix A. 453 /// 454 /// Refs.: 455 /// [1] - Analytical Modeling is Enough for High Performance BLIS 456 /// Tze Meng Low, Francisco D Igual, Tyler M Smith, Enrique S Quintana-Orti 457 /// Technical Report, 2014 458 /// http://www.cs.utexas.edu/users/flame/pubs/TOMS-BLIS-Analytical.pdf 459 /// 460 /// @see ScheduleTreeOptimizer::createMicroKernel 461 /// @see ScheduleTreeOptimizer::createMacroKernel 462 /// @see getMicroKernelParams 463 /// @see getMacroKernelParams 464 /// 465 /// TODO: Implement the packing transformation. 466 /// 467 /// @param Node The node that contains a band to be optimized. The node 468 /// is required to successfully pass 469 /// ScheduleTreeOptimizer::isMatrMultPattern. 470 /// @param TTI Target Transform Info. 471 /// @param MMI Parameters of the matrix multiplication operands. 472 /// @returns The transformed schedule. 473 static isl::schedule_node 474 optimizeMatMulPattern(isl::schedule_node Node, 475 const llvm::TargetTransformInfo *TTI, 476 MatMulInfoTy &MMI); 477 478 /// Check if this node is a band node we want to tile. 479 /// 480 /// We look for innermost band nodes where individual dimensions are marked as 481 /// permutable. 482 /// 483 /// @param Node The node to check. 484 static bool isTileableBandNode(isl::schedule_node Node); 485 486 /// Pre-vectorizes one scheduling dimension of a schedule band. 487 /// 488 /// prevectSchedBand splits out the dimension DimToVectorize, tiles it and 489 /// sinks the resulting point loop. 490 /// 491 /// Example (DimToVectorize=0, VectorWidth=4): 492 /// 493 /// | Before transformation: 494 /// | 495 /// | A[i,j] -> [i,j] 496 /// | 497 /// | for (i = 0; i < 128; i++) 498 /// | for (j = 0; j < 128; j++) 499 /// | A(i,j); 500 /// 501 /// | After transformation: 502 /// | 503 /// | for (it = 0; it < 32; it+=1) 504 /// | for (j = 0; j < 128; j++) 505 /// | for (ip = 0; ip <= 3; ip++) 506 /// | A(4 * it + ip,j); 507 /// 508 /// The goal of this transformation is to create a trivially vectorizable 509 /// loop. This means a parallel loop at the innermost level that has a 510 /// constant number of iterations corresponding to the target vector width. 511 /// 512 /// This transformation creates a loop at the innermost level. The loop has 513 /// a constant number of iterations, if the number of loop iterations at 514 /// DimToVectorize can be divided by VectorWidth. The default VectorWidth is 515 /// currently constant and not yet target specific. This function does not 516 /// reason about parallelism. 517 static isl::schedule_node prevectSchedBand(isl::schedule_node Node, 518 unsigned DimToVectorize, 519 int VectorWidth); 520 521 /// Apply additional optimizations on the bands in the schedule tree. 522 /// 523 /// We are looking for an innermost band node and apply the following 524 /// transformations: 525 /// 526 /// - Tile the band 527 /// - if the band is tileable 528 /// - if the band has more than one loop dimension 529 /// 530 /// - Prevectorize the schedule of the band (or the point loop in case of 531 /// tiling). 532 /// - if vectorization is enabled 533 /// 534 /// @param Node The schedule node to (possibly) optimize. 535 /// @param User A pointer to forward some use information 536 /// (currently unused). 537 static isl_schedule_node *optimizeBand(isl_schedule_node *Node, void *User); 538 539 /// Apply additional optimizations on the bands in the schedule tree. 540 /// 541 /// We apply the following 542 /// transformations: 543 /// 544 /// - Tile the band 545 /// - Prevectorize the schedule of the band (or the point loop in case of 546 /// tiling). 547 /// - if vectorization is enabled 548 /// 549 /// @param Node The schedule node to (possibly) optimize. 550 /// @param User A pointer to forward some use information 551 /// (currently unused). 552 static isl::schedule_node standardBandOpts(isl::schedule_node Node, 553 void *User); 554 555 /// Check if this node contains a partial schedule that could 556 /// probably be optimized with analytical modeling. 557 /// 558 /// isMatrMultPattern tries to determine whether the following conditions 559 /// are true: 560 /// 1. the partial schedule contains only one statement. 561 /// 2. there are exactly three input dimensions. 562 /// 3. all memory accesses of the statement will have stride 0 or 1, if we 563 /// interchange loops (switch the variable used in the inner loop to 564 /// the outer loop). 565 /// 4. all memory accesses of the statement except from the last one, are 566 /// read memory access and the last one is write memory access. 567 /// 5. all subscripts of the last memory access of the statement don't 568 /// contain the variable used in the inner loop. 569 /// If this is the case, we could try to use an approach that is similar to 570 /// the one used to get close-to-peak performance of matrix multiplications. 571 /// 572 /// @param Node The node to check. 573 /// @param D The SCoP dependencies. 574 /// @param MMI Parameters of the matrix multiplication operands. 575 static bool isMatrMultPattern(isl::schedule_node Node, 576 const polly::Dependences *D, MatMulInfoTy &MMI); 577 578 /// Create the BLIS macro-kernel. 579 /// 580 /// We create the BLIS macro-kernel by applying a combination of tiling 581 /// of dimensions of the band node and interchanging of two innermost 582 /// modified dimensions. The values of of MacroKernelParams's fields are used 583 /// as tile sizes. 584 /// 585 /// @param Node The schedule node to be modified. 586 /// @param MacroKernelParams Parameters of the macro kernel 587 /// to be used as tile sizes. 588 static isl::schedule_node 589 createMacroKernel(isl::schedule_node Node, 590 MacroKernelParamsTy MacroKernelParams); 591 592 /// Create the BLIS macro-kernel. 593 /// 594 /// We create the BLIS macro-kernel by applying a combination of tiling 595 /// of dimensions of the band node and interchanging of two innermost 596 /// modified dimensions. The values passed in MicroKernelParam are used 597 /// as tile sizes. 598 /// 599 /// @param Node The schedule node to be modified. 600 /// @param MicroKernelParams Parameters of the micro kernel 601 /// to be used as tile sizes. 602 /// @see MicroKernelParamsTy 603 static isl::schedule_node 604 createMicroKernel(isl::schedule_node Node, 605 MicroKernelParamsTy MicroKernelParams); 606 }; 607 608 /// Create an isl::union_set, which describes the isolate option based on 609 /// IsolateDomain. 610 /// 611 /// @param IsolateDomain An isl::set whose @p OutDimsNum last dimensions should 612 /// belong to the current band node. 613 /// @param OutDimsNum A number of dimensions that should belong to 614 /// the current band node. 615 static isl::union_set getIsolateOptions(isl::set IsolateDomain, 616 isl_size OutDimsNum) { 617 isl_size Dims = IsolateDomain.dim(isl::dim::set); 618 assert(OutDimsNum <= Dims && 619 "The isl::set IsolateDomain is used to describe the range of schedule " 620 "dimensions values, which should be isolated. Consequently, the " 621 "number of its dimensions should be greater than or equal to the " 622 "number of the schedule dimensions."); 623 isl::map IsolateRelation = isl::map::from_domain(IsolateDomain); 624 IsolateRelation = IsolateRelation.move_dims(isl::dim::out, 0, isl::dim::in, 625 Dims - OutDimsNum, OutDimsNum); 626 isl::set IsolateOption = IsolateRelation.wrap(); 627 isl::id Id = isl::id::alloc(IsolateOption.get_ctx(), "isolate", nullptr); 628 IsolateOption = IsolateOption.set_tuple_id(Id); 629 return isl::union_set(IsolateOption); 630 } 631 632 /// Create an isl::union_set, which describes the specified option for the 633 /// dimension of the current node. 634 /// 635 /// @param Ctx An isl::ctx, which is used to create the isl::union_set. 636 /// @param Option The name of the option. 637 isl::union_set getDimOptions(isl::ctx Ctx, const char *Option) { 638 isl::space Space(Ctx, 0, 1); 639 auto DimOption = isl::set::universe(Space); 640 auto Id = isl::id::alloc(Ctx, Option, nullptr); 641 DimOption = DimOption.set_tuple_id(Id); 642 return isl::union_set(DimOption); 643 } 644 645 /// Create an isl::union_set, which describes the option of the form 646 /// [isolate[] -> unroll[x]]. 647 /// 648 /// @param Ctx An isl::ctx, which is used to create the isl::union_set. 649 static isl::union_set getUnrollIsolatedSetOptions(isl::ctx Ctx) { 650 isl::space Space = isl::space(Ctx, 0, 0, 1); 651 isl::map UnrollIsolatedSetOption = isl::map::universe(Space); 652 isl::id DimInId = isl::id::alloc(Ctx, "isolate", nullptr); 653 isl::id DimOutId = isl::id::alloc(Ctx, "unroll", nullptr); 654 UnrollIsolatedSetOption = 655 UnrollIsolatedSetOption.set_tuple_id(isl::dim::in, DimInId); 656 UnrollIsolatedSetOption = 657 UnrollIsolatedSetOption.set_tuple_id(isl::dim::out, DimOutId); 658 return UnrollIsolatedSetOption.wrap(); 659 } 660 661 /// Make the last dimension of Set to take values from 0 to VectorWidth - 1. 662 /// 663 /// @param Set A set, which should be modified. 664 /// @param VectorWidth A parameter, which determines the constraint. 665 static isl::set addExtentConstraints(isl::set Set, int VectorWidth) { 666 unsigned Dims = Set.dim(isl::dim::set); 667 isl::space Space = Set.get_space(); 668 isl::local_space LocalSpace = isl::local_space(Space); 669 isl::constraint ExtConstr = isl::constraint::alloc_inequality(LocalSpace); 670 ExtConstr = ExtConstr.set_constant_si(0); 671 ExtConstr = ExtConstr.set_coefficient_si(isl::dim::set, Dims - 1, 1); 672 Set = Set.add_constraint(ExtConstr); 673 ExtConstr = isl::constraint::alloc_inequality(LocalSpace); 674 ExtConstr = ExtConstr.set_constant_si(VectorWidth - 1); 675 ExtConstr = ExtConstr.set_coefficient_si(isl::dim::set, Dims - 1, -1); 676 return Set.add_constraint(ExtConstr); 677 } 678 } // namespace 679 680 isl::set polly::getPartialTilePrefixes(isl::set ScheduleRange, 681 int VectorWidth) { 682 isl_size Dims = ScheduleRange.dim(isl::dim::set); 683 isl::set LoopPrefixes = 684 ScheduleRange.drop_constraints_involving_dims(isl::dim::set, Dims - 1, 1); 685 auto ExtentPrefixes = addExtentConstraints(LoopPrefixes, VectorWidth); 686 isl::set BadPrefixes = ExtentPrefixes.subtract(ScheduleRange); 687 BadPrefixes = BadPrefixes.project_out(isl::dim::set, Dims - 1, 1); 688 LoopPrefixes = LoopPrefixes.project_out(isl::dim::set, Dims - 1, 1); 689 return LoopPrefixes.subtract(BadPrefixes); 690 } 691 692 namespace { 693 isl::schedule_node 694 ScheduleTreeOptimizer::isolateFullPartialTiles(isl::schedule_node Node, 695 int VectorWidth) { 696 assert(isl_schedule_node_get_type(Node.get()) == isl_schedule_node_band); 697 Node = Node.child(0).child(0); 698 isl::union_map SchedRelUMap = Node.get_prefix_schedule_relation(); 699 isl::union_set ScheduleRangeUSet = SchedRelUMap.range(); 700 isl::set ScheduleRange{ScheduleRangeUSet}; 701 isl::set IsolateDomain = getPartialTilePrefixes(ScheduleRange, VectorWidth); 702 auto AtomicOption = getDimOptions(IsolateDomain.get_ctx(), "atomic"); 703 isl::union_set IsolateOption = getIsolateOptions(IsolateDomain, 1); 704 Node = Node.parent().parent(); 705 isl::union_set Options = IsolateOption.unite(AtomicOption); 706 Node = Node.band_set_ast_build_options(Options); 707 return Node; 708 } 709 710 isl::schedule_node ScheduleTreeOptimizer::prevectSchedBand( 711 isl::schedule_node Node, unsigned DimToVectorize, int VectorWidth) { 712 assert(isl_schedule_node_get_type(Node.get()) == isl_schedule_node_band); 713 714 auto Space = isl::manage(isl_schedule_node_band_get_space(Node.get())); 715 isl_size ScheduleDimensions = Space.dim(isl::dim::set); 716 assert((isl_size)DimToVectorize < ScheduleDimensions); 717 718 if (DimToVectorize > 0) { 719 Node = isl::manage( 720 isl_schedule_node_band_split(Node.release(), DimToVectorize)); 721 Node = Node.child(0); 722 } 723 if ((isl_size)DimToVectorize < ScheduleDimensions - 1) 724 Node = isl::manage(isl_schedule_node_band_split(Node.release(), 1)); 725 Space = isl::manage(isl_schedule_node_band_get_space(Node.get())); 726 auto Sizes = isl::multi_val::zero(Space); 727 Sizes = Sizes.set_val(0, isl::val(Node.get_ctx(), VectorWidth)); 728 Node = 729 isl::manage(isl_schedule_node_band_tile(Node.release(), Sizes.release())); 730 Node = isolateFullPartialTiles(Node, VectorWidth); 731 Node = Node.child(0); 732 // Make sure the "trivially vectorizable loop" is not unrolled. Otherwise, 733 // we will have troubles to match it in the backend. 734 Node = Node.band_set_ast_build_options( 735 isl::union_set(Node.get_ctx(), "{ unroll[x]: 1 = 0 }")); 736 Node = isl::manage(isl_schedule_node_band_sink(Node.release())); 737 Node = Node.child(0); 738 if (isl_schedule_node_get_type(Node.get()) == isl_schedule_node_leaf) 739 Node = Node.parent(); 740 auto LoopMarker = isl::id::alloc(Node.get_ctx(), "SIMD", nullptr); 741 PrevectOpts++; 742 return Node.insert_mark(LoopMarker); 743 } 744 745 isl::schedule_node ScheduleTreeOptimizer::tileNode(isl::schedule_node Node, 746 const char *Identifier, 747 ArrayRef<int> TileSizes, 748 int DefaultTileSize) { 749 auto Space = isl::manage(isl_schedule_node_band_get_space(Node.get())); 750 auto Dims = Space.dim(isl::dim::set); 751 auto Sizes = isl::multi_val::zero(Space); 752 std::string IdentifierString(Identifier); 753 for (auto i : seq<isl_size>(0, Dims)) { 754 auto tileSize = 755 i < (isl_size)TileSizes.size() ? TileSizes[i] : DefaultTileSize; 756 Sizes = Sizes.set_val(i, isl::val(Node.get_ctx(), tileSize)); 757 } 758 auto TileLoopMarkerStr = IdentifierString + " - Tiles"; 759 auto TileLoopMarker = 760 isl::id::alloc(Node.get_ctx(), TileLoopMarkerStr, nullptr); 761 Node = Node.insert_mark(TileLoopMarker); 762 Node = Node.child(0); 763 Node = 764 isl::manage(isl_schedule_node_band_tile(Node.release(), Sizes.release())); 765 Node = Node.child(0); 766 auto PointLoopMarkerStr = IdentifierString + " - Points"; 767 auto PointLoopMarker = 768 isl::id::alloc(Node.get_ctx(), PointLoopMarkerStr, nullptr); 769 Node = Node.insert_mark(PointLoopMarker); 770 return Node.child(0); 771 } 772 773 isl::schedule_node ScheduleTreeOptimizer::applyRegisterTiling( 774 isl::schedule_node Node, ArrayRef<int> TileSizes, int DefaultTileSize) { 775 Node = tileNode(Node, "Register tiling", TileSizes, DefaultTileSize); 776 auto Ctx = Node.get_ctx(); 777 return Node.band_set_ast_build_options(isl::union_set(Ctx, "{unroll[x]}")); 778 } 779 780 static bool isSimpleInnermostBand(const isl::schedule_node &Node) { 781 assert(isl_schedule_node_get_type(Node.get()) == isl_schedule_node_band); 782 assert(isl_schedule_node_n_children(Node.get()) == 1); 783 784 auto ChildType = isl_schedule_node_get_type(Node.child(0).get()); 785 786 if (ChildType == isl_schedule_node_leaf) 787 return true; 788 789 if (ChildType != isl_schedule_node_sequence) 790 return false; 791 792 auto Sequence = Node.child(0); 793 794 for (int c = 0, nc = isl_schedule_node_n_children(Sequence.get()); c < nc; 795 ++c) { 796 auto Child = Sequence.child(c); 797 if (isl_schedule_node_get_type(Child.get()) != isl_schedule_node_filter) 798 return false; 799 if (isl_schedule_node_get_type(Child.child(0).get()) != 800 isl_schedule_node_leaf) 801 return false; 802 } 803 return true; 804 } 805 806 bool ScheduleTreeOptimizer::isTileableBandNode(isl::schedule_node Node) { 807 if (isl_schedule_node_get_type(Node.get()) != isl_schedule_node_band) 808 return false; 809 810 if (isl_schedule_node_n_children(Node.get()) != 1) 811 return false; 812 813 if (!isl_schedule_node_band_get_permutable(Node.get())) 814 return false; 815 816 auto Space = isl::manage(isl_schedule_node_band_get_space(Node.get())); 817 auto Dims = Space.dim(isl::dim::set); 818 819 if (Dims <= 1) 820 return false; 821 822 return isSimpleInnermostBand(Node); 823 } 824 825 __isl_give isl::schedule_node 826 ScheduleTreeOptimizer::standardBandOpts(isl::schedule_node Node, void *User) { 827 if (FirstLevelTiling) { 828 Node = tileNode(Node, "1st level tiling", FirstLevelTileSizes, 829 FirstLevelDefaultTileSize); 830 FirstLevelTileOpts++; 831 } 832 833 if (SecondLevelTiling) { 834 Node = tileNode(Node, "2nd level tiling", SecondLevelTileSizes, 835 SecondLevelDefaultTileSize); 836 SecondLevelTileOpts++; 837 } 838 839 if (RegisterTiling) { 840 Node = 841 applyRegisterTiling(Node, RegisterTileSizes, RegisterDefaultTileSize); 842 RegisterTileOpts++; 843 } 844 845 if (PollyVectorizerChoice == VECTORIZER_NONE) 846 return Node; 847 848 auto Space = isl::manage(isl_schedule_node_band_get_space(Node.get())); 849 auto Dims = Space.dim(isl::dim::set); 850 851 for (int i = Dims - 1; i >= 0; i--) 852 if (Node.band_member_get_coincident(i)) { 853 Node = prevectSchedBand(Node, i, PrevectorWidth); 854 break; 855 } 856 857 return Node; 858 } 859 860 /// Permute the two dimensions of the isl map. 861 /// 862 /// Permute @p DstPos and @p SrcPos dimensions of the isl map @p Map that 863 /// have type @p DimType. 864 /// 865 /// @param Map The isl map to be modified. 866 /// @param DimType The type of the dimensions. 867 /// @param DstPos The first dimension. 868 /// @param SrcPos The second dimension. 869 /// @return The modified map. 870 isl::map permuteDimensions(isl::map Map, isl::dim DimType, unsigned DstPos, 871 unsigned SrcPos) { 872 assert((isl_size)DstPos < Map.dim(DimType) && 873 (isl_size)SrcPos < Map.dim(DimType)); 874 if (DstPos == SrcPos) 875 return Map; 876 isl::id DimId; 877 if (Map.has_tuple_id(DimType)) 878 DimId = Map.get_tuple_id(DimType); 879 auto FreeDim = DimType == isl::dim::in ? isl::dim::out : isl::dim::in; 880 isl::id FreeDimId; 881 if (Map.has_tuple_id(FreeDim)) 882 FreeDimId = Map.get_tuple_id(FreeDim); 883 auto MaxDim = std::max(DstPos, SrcPos); 884 auto MinDim = std::min(DstPos, SrcPos); 885 Map = Map.move_dims(FreeDim, 0, DimType, MaxDim, 1); 886 Map = Map.move_dims(FreeDim, 0, DimType, MinDim, 1); 887 Map = Map.move_dims(DimType, MinDim, FreeDim, 1, 1); 888 Map = Map.move_dims(DimType, MaxDim, FreeDim, 0, 1); 889 if (DimId) 890 Map = Map.set_tuple_id(DimType, DimId); 891 if (FreeDimId) 892 Map = Map.set_tuple_id(FreeDim, FreeDimId); 893 return Map; 894 } 895 896 /// Check the form of the access relation. 897 /// 898 /// Check that the access relation @p AccMap has the form M[i][j], where i 899 /// is a @p FirstPos and j is a @p SecondPos. 900 /// 901 /// @param AccMap The access relation to be checked. 902 /// @param FirstPos The index of the input dimension that is mapped to 903 /// the first output dimension. 904 /// @param SecondPos The index of the input dimension that is mapped to the 905 /// second output dimension. 906 /// @return True in case @p AccMap has the expected form and false, 907 /// otherwise. 908 static bool isMatMulOperandAcc(isl::set Domain, isl::map AccMap, int &FirstPos, 909 int &SecondPos) { 910 isl::space Space = AccMap.get_space(); 911 isl::map Universe = isl::map::universe(Space); 912 913 if (Space.dim(isl::dim::out) != 2) 914 return false; 915 916 // MatMul has the form: 917 // for (i = 0; i < N; i++) 918 // for (j = 0; j < M; j++) 919 // for (k = 0; k < P; k++) 920 // C[i, j] += A[i, k] * B[k, j] 921 // 922 // Permutation of three outer loops: 3! = 6 possibilities. 923 int FirstDims[] = {0, 0, 1, 1, 2, 2}; 924 int SecondDims[] = {1, 2, 2, 0, 0, 1}; 925 for (int i = 0; i < 6; i += 1) { 926 auto PossibleMatMul = 927 Universe.equate(isl::dim::in, FirstDims[i], isl::dim::out, 0) 928 .equate(isl::dim::in, SecondDims[i], isl::dim::out, 1); 929 930 AccMap = AccMap.intersect_domain(Domain); 931 PossibleMatMul = PossibleMatMul.intersect_domain(Domain); 932 933 // If AccMap spans entire domain (Non-partial write), 934 // compute FirstPos and SecondPos. 935 // If AccMap != PossibleMatMul here (the two maps have been gisted at 936 // this point), it means that the writes are not complete, or in other 937 // words, it is a Partial write and Partial writes must be rejected. 938 if (AccMap.is_equal(PossibleMatMul)) { 939 if (FirstPos != -1 && FirstPos != FirstDims[i]) 940 continue; 941 FirstPos = FirstDims[i]; 942 if (SecondPos != -1 && SecondPos != SecondDims[i]) 943 continue; 944 SecondPos = SecondDims[i]; 945 return true; 946 } 947 } 948 949 return false; 950 } 951 952 /// Does the memory access represent a non-scalar operand of the matrix 953 /// multiplication. 954 /// 955 /// Check that the memory access @p MemAccess is the read access to a non-scalar 956 /// operand of the matrix multiplication or its result. 957 /// 958 /// @param MemAccess The memory access to be checked. 959 /// @param MMI Parameters of the matrix multiplication operands. 960 /// @return True in case the memory access represents the read access 961 /// to a non-scalar operand of the matrix multiplication and 962 /// false, otherwise. 963 static bool isMatMulNonScalarReadAccess(MemoryAccess *MemAccess, 964 MatMulInfoTy &MMI) { 965 if (!MemAccess->isLatestArrayKind() || !MemAccess->isRead()) 966 return false; 967 auto AccMap = MemAccess->getLatestAccessRelation(); 968 isl::set StmtDomain = MemAccess->getStatement()->getDomain(); 969 if (isMatMulOperandAcc(StmtDomain, AccMap, MMI.i, MMI.j) && !MMI.ReadFromC) { 970 MMI.ReadFromC = MemAccess; 971 return true; 972 } 973 if (isMatMulOperandAcc(StmtDomain, AccMap, MMI.i, MMI.k) && !MMI.A) { 974 MMI.A = MemAccess; 975 return true; 976 } 977 if (isMatMulOperandAcc(StmtDomain, AccMap, MMI.k, MMI.j) && !MMI.B) { 978 MMI.B = MemAccess; 979 return true; 980 } 981 return false; 982 } 983 984 /// Check accesses to operands of the matrix multiplication. 985 /// 986 /// Check that accesses of the SCoP statement, which corresponds to 987 /// the partial schedule @p PartialSchedule, are scalar in terms of loops 988 /// containing the matrix multiplication, in case they do not represent 989 /// accesses to the non-scalar operands of the matrix multiplication or 990 /// its result. 991 /// 992 /// @param PartialSchedule The partial schedule of the SCoP statement. 993 /// @param MMI Parameters of the matrix multiplication operands. 994 /// @return True in case the corresponding SCoP statement 995 /// represents matrix multiplication and false, 996 /// otherwise. 997 static bool containsOnlyMatrMultAcc(isl::map PartialSchedule, 998 MatMulInfoTy &MMI) { 999 auto InputDimId = PartialSchedule.get_tuple_id(isl::dim::in); 1000 auto *Stmt = static_cast<ScopStmt *>(InputDimId.get_user()); 1001 isl_size OutDimNum = PartialSchedule.dim(isl::dim::out); 1002 assert(OutDimNum > 2 && "In case of the matrix multiplication the loop nest " 1003 "and, consequently, the corresponding scheduling " 1004 "functions have at least three dimensions."); 1005 auto MapI = 1006 permuteDimensions(PartialSchedule, isl::dim::out, MMI.i, OutDimNum - 1); 1007 auto MapJ = 1008 permuteDimensions(PartialSchedule, isl::dim::out, MMI.j, OutDimNum - 1); 1009 auto MapK = 1010 permuteDimensions(PartialSchedule, isl::dim::out, MMI.k, OutDimNum - 1); 1011 1012 auto Accesses = getAccessesInOrder(*Stmt); 1013 for (auto *MemA = Accesses.begin(); MemA != Accesses.end() - 1; MemA++) { 1014 auto *MemAccessPtr = *MemA; 1015 if (MemAccessPtr->isLatestArrayKind() && MemAccessPtr != MMI.WriteToC && 1016 !isMatMulNonScalarReadAccess(MemAccessPtr, MMI) && 1017 !(MemAccessPtr->isStrideZero(MapI)) && 1018 MemAccessPtr->isStrideZero(MapJ) && MemAccessPtr->isStrideZero(MapK)) 1019 return false; 1020 } 1021 return true; 1022 } 1023 1024 /// Check for dependencies corresponding to the matrix multiplication. 1025 /// 1026 /// Check that there is only true dependence of the form 1027 /// S(..., k, ...) -> S(..., k + 1, …), where S is the SCoP statement 1028 /// represented by @p Schedule and k is @p Pos. Such a dependence corresponds 1029 /// to the dependency produced by the matrix multiplication. 1030 /// 1031 /// @param Schedule The schedule of the SCoP statement. 1032 /// @param D The SCoP dependencies. 1033 /// @param Pos The parameter to describe an acceptable true dependence. 1034 /// In case it has a negative value, try to determine its 1035 /// acceptable value. 1036 /// @return True in case dependencies correspond to the matrix multiplication 1037 /// and false, otherwise. 1038 static bool containsOnlyMatMulDep(isl::map Schedule, const Dependences *D, 1039 int &Pos) { 1040 isl::union_map Dep = D->getDependences(Dependences::TYPE_RAW); 1041 isl::union_map Red = D->getDependences(Dependences::TYPE_RED); 1042 if (Red) 1043 Dep = Dep.unite(Red); 1044 auto DomainSpace = Schedule.get_space().domain(); 1045 auto Space = DomainSpace.map_from_domain_and_range(DomainSpace); 1046 auto Deltas = Dep.extract_map(Space).deltas(); 1047 isl_size DeltasDimNum = Deltas.dim(isl::dim::set); 1048 for (int i = 0; i < DeltasDimNum; i++) { 1049 auto Val = Deltas.plain_get_val_if_fixed(isl::dim::set, i); 1050 Pos = Pos < 0 && Val.is_one() ? i : Pos; 1051 if (Val.is_nan() || !(Val.is_zero() || (i == Pos && Val.is_one()))) 1052 return false; 1053 } 1054 if (DeltasDimNum == 0 || Pos < 0) 1055 return false; 1056 return true; 1057 } 1058 1059 /// Check if the SCoP statement could probably be optimized with analytical 1060 /// modeling. 1061 /// 1062 /// containsMatrMult tries to determine whether the following conditions 1063 /// are true: 1064 /// 1. The last memory access modeling an array, MA1, represents writing to 1065 /// memory and has the form S(..., i1, ..., i2, ...) -> M(i1, i2) or 1066 /// S(..., i2, ..., i1, ...) -> M(i1, i2), where S is the SCoP statement 1067 /// under consideration. 1068 /// 2. There is only one loop-carried true dependency, and it has the 1069 /// form S(..., i3, ...) -> S(..., i3 + 1, ...), and there are no 1070 /// loop-carried or anti dependencies. 1071 /// 3. SCoP contains three access relations, MA2, MA3, and MA4 that represent 1072 /// reading from memory and have the form S(..., i3, ...) -> M(i1, i3), 1073 /// S(..., i3, ...) -> M(i3, i2), S(...) -> M(i1, i2), respectively, 1074 /// and all memory accesses of the SCoP that are different from MA1, MA2, 1075 /// MA3, and MA4 have stride 0, if the innermost loop is exchanged with any 1076 /// of loops i1, i2 and i3. 1077 /// 1078 /// @param PartialSchedule The PartialSchedule that contains a SCoP statement 1079 /// to check. 1080 /// @D The SCoP dependencies. 1081 /// @MMI Parameters of the matrix multiplication operands. 1082 static bool containsMatrMult(isl::map PartialSchedule, const Dependences *D, 1083 MatMulInfoTy &MMI) { 1084 auto InputDimsId = PartialSchedule.get_tuple_id(isl::dim::in); 1085 auto *Stmt = static_cast<ScopStmt *>(InputDimsId.get_user()); 1086 if (Stmt->size() <= 1) 1087 return false; 1088 1089 auto Accesses = getAccessesInOrder(*Stmt); 1090 for (auto *MemA = Accesses.end() - 1; MemA != Accesses.begin(); MemA--) { 1091 auto *MemAccessPtr = *MemA; 1092 if (!MemAccessPtr->isLatestArrayKind()) 1093 continue; 1094 if (!MemAccessPtr->isWrite()) 1095 return false; 1096 auto AccMap = MemAccessPtr->getLatestAccessRelation(); 1097 if (!isMatMulOperandAcc(Stmt->getDomain(), AccMap, MMI.i, MMI.j)) 1098 return false; 1099 MMI.WriteToC = MemAccessPtr; 1100 break; 1101 } 1102 1103 if (!containsOnlyMatMulDep(PartialSchedule, D, MMI.k)) 1104 return false; 1105 1106 if (!MMI.WriteToC || !containsOnlyMatrMultAcc(PartialSchedule, MMI)) 1107 return false; 1108 1109 if (!MMI.A || !MMI.B || !MMI.ReadFromC) 1110 return false; 1111 return true; 1112 } 1113 1114 /// Permute two dimensions of the band node. 1115 /// 1116 /// Permute FirstDim and SecondDim dimensions of the Node. 1117 /// 1118 /// @param Node The band node to be modified. 1119 /// @param FirstDim The first dimension to be permuted. 1120 /// @param SecondDim The second dimension to be permuted. 1121 static isl::schedule_node permuteBandNodeDimensions(isl::schedule_node Node, 1122 unsigned FirstDim, 1123 unsigned SecondDim) { 1124 assert(isl_schedule_node_get_type(Node.get()) == isl_schedule_node_band && 1125 (unsigned)isl_schedule_node_band_n_member(Node.get()) > 1126 std::max(FirstDim, SecondDim)); 1127 auto PartialSchedule = 1128 isl::manage(isl_schedule_node_band_get_partial_schedule(Node.get())); 1129 auto PartialScheduleFirstDim = PartialSchedule.get_union_pw_aff(FirstDim); 1130 auto PartialScheduleSecondDim = PartialSchedule.get_union_pw_aff(SecondDim); 1131 PartialSchedule = 1132 PartialSchedule.set_union_pw_aff(SecondDim, PartialScheduleFirstDim); 1133 PartialSchedule = 1134 PartialSchedule.set_union_pw_aff(FirstDim, PartialScheduleSecondDim); 1135 Node = isl::manage(isl_schedule_node_delete(Node.release())); 1136 return Node.insert_partial_schedule(PartialSchedule); 1137 } 1138 1139 isl::schedule_node ScheduleTreeOptimizer::createMicroKernel( 1140 isl::schedule_node Node, MicroKernelParamsTy MicroKernelParams) { 1141 Node = applyRegisterTiling(Node, {MicroKernelParams.Mr, MicroKernelParams.Nr}, 1142 1); 1143 Node = Node.parent().parent(); 1144 return permuteBandNodeDimensions(Node, 0, 1).child(0).child(0); 1145 } 1146 1147 isl::schedule_node ScheduleTreeOptimizer::createMacroKernel( 1148 isl::schedule_node Node, MacroKernelParamsTy MacroKernelParams) { 1149 assert(isl_schedule_node_get_type(Node.get()) == isl_schedule_node_band); 1150 if (MacroKernelParams.Mc == 1 && MacroKernelParams.Nc == 1 && 1151 MacroKernelParams.Kc == 1) 1152 return Node; 1153 int DimOutNum = isl_schedule_node_band_n_member(Node.get()); 1154 std::vector<int> TileSizes(DimOutNum, 1); 1155 TileSizes[DimOutNum - 3] = MacroKernelParams.Mc; 1156 TileSizes[DimOutNum - 2] = MacroKernelParams.Nc; 1157 TileSizes[DimOutNum - 1] = MacroKernelParams.Kc; 1158 Node = tileNode(Node, "1st level tiling", TileSizes, 1); 1159 Node = Node.parent().parent(); 1160 Node = permuteBandNodeDimensions(Node, DimOutNum - 2, DimOutNum - 1); 1161 Node = permuteBandNodeDimensions(Node, DimOutNum - 3, DimOutNum - 1); 1162 1163 // Mark the outermost loop as parallelizable. 1164 Node = Node.band_member_set_coincident(0, true); 1165 1166 return Node.child(0).child(0); 1167 } 1168 1169 /// Get the size of the widest type of the matrix multiplication operands 1170 /// in bytes, including alignment padding. 1171 /// 1172 /// @param MMI Parameters of the matrix multiplication operands. 1173 /// @return The size of the widest type of the matrix multiplication operands 1174 /// in bytes, including alignment padding. 1175 static uint64_t getMatMulAlignTypeSize(MatMulInfoTy MMI) { 1176 auto *S = MMI.A->getStatement()->getParent(); 1177 auto &DL = S->getFunction().getParent()->getDataLayout(); 1178 auto ElementSizeA = DL.getTypeAllocSize(MMI.A->getElementType()); 1179 auto ElementSizeB = DL.getTypeAllocSize(MMI.B->getElementType()); 1180 auto ElementSizeC = DL.getTypeAllocSize(MMI.WriteToC->getElementType()); 1181 return std::max({ElementSizeA, ElementSizeB, ElementSizeC}); 1182 } 1183 1184 /// Get the size of the widest type of the matrix multiplication operands 1185 /// in bits. 1186 /// 1187 /// @param MMI Parameters of the matrix multiplication operands. 1188 /// @return The size of the widest type of the matrix multiplication operands 1189 /// in bits. 1190 static uint64_t getMatMulTypeSize(MatMulInfoTy MMI) { 1191 auto *S = MMI.A->getStatement()->getParent(); 1192 auto &DL = S->getFunction().getParent()->getDataLayout(); 1193 auto ElementSizeA = DL.getTypeSizeInBits(MMI.A->getElementType()); 1194 auto ElementSizeB = DL.getTypeSizeInBits(MMI.B->getElementType()); 1195 auto ElementSizeC = DL.getTypeSizeInBits(MMI.WriteToC->getElementType()); 1196 return std::max({ElementSizeA, ElementSizeB, ElementSizeC}); 1197 } 1198 1199 /// Get parameters of the BLIS micro kernel. 1200 /// 1201 /// We choose the Mr and Nr parameters of the micro kernel to be large enough 1202 /// such that no stalls caused by the combination of latencies and dependencies 1203 /// are introduced during the updates of the resulting matrix of the matrix 1204 /// multiplication. However, they should also be as small as possible to 1205 /// release more registers for entries of multiplied matrices. 1206 /// 1207 /// @param TTI Target Transform Info. 1208 /// @param MMI Parameters of the matrix multiplication operands. 1209 /// @return The structure of type MicroKernelParamsTy. 1210 /// @see MicroKernelParamsTy 1211 static struct MicroKernelParamsTy 1212 getMicroKernelParams(const TargetTransformInfo *TTI, MatMulInfoTy MMI) { 1213 assert(TTI && "The target transform info should be provided."); 1214 1215 // Nvec - Number of double-precision floating-point numbers that can be hold 1216 // by a vector register. Use 2 by default. 1217 long RegisterBitwidth = VectorRegisterBitwidth; 1218 1219 if (RegisterBitwidth == -1) 1220 RegisterBitwidth = 1221 TTI->getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector); 1222 auto ElementSize = getMatMulTypeSize(MMI); 1223 assert(ElementSize > 0 && "The element size of the matrix multiplication " 1224 "operands should be greater than zero."); 1225 auto Nvec = RegisterBitwidth / ElementSize; 1226 if (Nvec == 0) 1227 Nvec = 2; 1228 int Nr = ceil(sqrt((double)(Nvec * LatencyVectorFma * ThroughputVectorFma)) / 1229 Nvec) * 1230 Nvec; 1231 int Mr = ceil((double)(Nvec * LatencyVectorFma * ThroughputVectorFma / Nr)); 1232 return {Mr, Nr}; 1233 } 1234 1235 /// Determine parameters of the target cache. 1236 /// 1237 /// @param TTI Target Transform Info. 1238 void getTargetCacheParameters(const llvm::TargetTransformInfo *TTI) { 1239 auto L1DCache = llvm::TargetTransformInfo::CacheLevel::L1D; 1240 auto L2DCache = llvm::TargetTransformInfo::CacheLevel::L2D; 1241 if (FirstCacheLevelSize == -1) { 1242 if (TTI->getCacheSize(L1DCache).hasValue()) 1243 FirstCacheLevelSize = TTI->getCacheSize(L1DCache).getValue(); 1244 else 1245 FirstCacheLevelSize = static_cast<int>(FirstCacheLevelDefaultSize); 1246 } 1247 if (SecondCacheLevelSize == -1) { 1248 if (TTI->getCacheSize(L2DCache).hasValue()) 1249 SecondCacheLevelSize = TTI->getCacheSize(L2DCache).getValue(); 1250 else 1251 SecondCacheLevelSize = static_cast<int>(SecondCacheLevelDefaultSize); 1252 } 1253 if (FirstCacheLevelAssociativity == -1) { 1254 if (TTI->getCacheAssociativity(L1DCache).hasValue()) 1255 FirstCacheLevelAssociativity = 1256 TTI->getCacheAssociativity(L1DCache).getValue(); 1257 else 1258 FirstCacheLevelAssociativity = 1259 static_cast<int>(FirstCacheLevelDefaultAssociativity); 1260 } 1261 if (SecondCacheLevelAssociativity == -1) { 1262 if (TTI->getCacheAssociativity(L2DCache).hasValue()) 1263 SecondCacheLevelAssociativity = 1264 TTI->getCacheAssociativity(L2DCache).getValue(); 1265 else 1266 SecondCacheLevelAssociativity = 1267 static_cast<int>(SecondCacheLevelDefaultAssociativity); 1268 } 1269 } 1270 1271 /// Get parameters of the BLIS macro kernel. 1272 /// 1273 /// During the computation of matrix multiplication, blocks of partitioned 1274 /// matrices are mapped to different layers of the memory hierarchy. 1275 /// To optimize data reuse, blocks should be ideally kept in cache between 1276 /// iterations. Since parameters of the macro kernel determine sizes of these 1277 /// blocks, there are upper and lower bounds on these parameters. 1278 /// 1279 /// @param TTI Target Transform Info. 1280 /// @param MicroKernelParams Parameters of the micro-kernel 1281 /// to be taken into account. 1282 /// @param MMI Parameters of the matrix multiplication operands. 1283 /// @return The structure of type MacroKernelParamsTy. 1284 /// @see MacroKernelParamsTy 1285 /// @see MicroKernelParamsTy 1286 static struct MacroKernelParamsTy 1287 getMacroKernelParams(const llvm::TargetTransformInfo *TTI, 1288 const MicroKernelParamsTy &MicroKernelParams, 1289 MatMulInfoTy MMI) { 1290 getTargetCacheParameters(TTI); 1291 // According to www.cs.utexas.edu/users/flame/pubs/TOMS-BLIS-Analytical.pdf, 1292 // it requires information about the first two levels of a cache to determine 1293 // all the parameters of a macro-kernel. It also checks that an associativity 1294 // degree of a cache level is greater than two. Otherwise, another algorithm 1295 // for determination of the parameters should be used. 1296 if (!(MicroKernelParams.Mr > 0 && MicroKernelParams.Nr > 0 && 1297 FirstCacheLevelSize > 0 && SecondCacheLevelSize > 0 && 1298 FirstCacheLevelAssociativity > 2 && SecondCacheLevelAssociativity > 2)) 1299 return {1, 1, 1}; 1300 // The quotient should be greater than zero. 1301 if (PollyPatternMatchingNcQuotient <= 0) 1302 return {1, 1, 1}; 1303 int Car = floor( 1304 (FirstCacheLevelAssociativity - 1) / 1305 (1 + static_cast<double>(MicroKernelParams.Nr) / MicroKernelParams.Mr)); 1306 1307 // Car can be computed to be zero since it is floor to int. 1308 // On Mac OS, division by 0 does not raise a signal. This causes negative 1309 // tile sizes to be computed. Prevent division by Cac==0 by early returning 1310 // if this happens. 1311 if (Car == 0) 1312 return {1, 1, 1}; 1313 1314 auto ElementSize = getMatMulAlignTypeSize(MMI); 1315 assert(ElementSize > 0 && "The element size of the matrix multiplication " 1316 "operands should be greater than zero."); 1317 int Kc = (Car * FirstCacheLevelSize) / 1318 (MicroKernelParams.Mr * FirstCacheLevelAssociativity * ElementSize); 1319 double Cac = 1320 static_cast<double>(Kc * ElementSize * SecondCacheLevelAssociativity) / 1321 SecondCacheLevelSize; 1322 int Mc = floor((SecondCacheLevelAssociativity - 2) / Cac); 1323 int Nc = PollyPatternMatchingNcQuotient * MicroKernelParams.Nr; 1324 1325 assert(Mc > 0 && Nc > 0 && Kc > 0 && 1326 "Matrix block sizes should be greater than zero"); 1327 return {Mc, Nc, Kc}; 1328 } 1329 1330 /// Create an access relation that is specific to 1331 /// the matrix multiplication pattern. 1332 /// 1333 /// Create an access relation of the following form: 1334 /// [O0, O1, O2, O3, O4, O5, O6, O7, O8] -> [OI, O5, OJ] 1335 /// where I is @p FirstDim, J is @p SecondDim. 1336 /// 1337 /// It can be used, for example, to create relations that helps to consequently 1338 /// access elements of operands of a matrix multiplication after creation of 1339 /// the BLIS micro and macro kernels. 1340 /// 1341 /// @see ScheduleTreeOptimizer::createMicroKernel 1342 /// @see ScheduleTreeOptimizer::createMacroKernel 1343 /// 1344 /// Subsequently, the described access relation is applied to the range of 1345 /// @p MapOldIndVar, that is used to map original induction variables to 1346 /// the ones, which are produced by schedule transformations. It helps to 1347 /// define relations using a new space and, at the same time, keep them 1348 /// in the original one. 1349 /// 1350 /// @param MapOldIndVar The relation, which maps original induction variables 1351 /// to the ones, which are produced by schedule 1352 /// transformations. 1353 /// @param FirstDim, SecondDim The input dimensions that are used to define 1354 /// the specified access relation. 1355 /// @return The specified access relation. 1356 isl::map getMatMulAccRel(isl::map MapOldIndVar, unsigned FirstDim, 1357 unsigned SecondDim) { 1358 auto AccessRelSpace = isl::space(MapOldIndVar.get_ctx(), 0, 9, 3); 1359 auto AccessRel = isl::map::universe(AccessRelSpace); 1360 AccessRel = AccessRel.equate(isl::dim::in, FirstDim, isl::dim::out, 0); 1361 AccessRel = AccessRel.equate(isl::dim::in, 5, isl::dim::out, 1); 1362 AccessRel = AccessRel.equate(isl::dim::in, SecondDim, isl::dim::out, 2); 1363 return MapOldIndVar.apply_range(AccessRel); 1364 } 1365 1366 isl::schedule_node createExtensionNode(isl::schedule_node Node, 1367 isl::map ExtensionMap) { 1368 auto Extension = isl::union_map(ExtensionMap); 1369 auto NewNode = isl::schedule_node::from_extension(Extension); 1370 return Node.graft_before(NewNode); 1371 } 1372 1373 /// Apply the packing transformation. 1374 /// 1375 /// The packing transformation can be described as a data-layout 1376 /// transformation that requires to introduce a new array, copy data 1377 /// to the array, and change memory access locations to reference the array. 1378 /// It can be used to ensure that elements of the new array are read in-stride 1379 /// access, aligned to cache lines boundaries, and preloaded into certain cache 1380 /// levels. 1381 /// 1382 /// As an example let us consider the packing of the array A that would help 1383 /// to read its elements with in-stride access. An access to the array A 1384 /// is represented by an access relation that has the form 1385 /// S[i, j, k] -> A[i, k]. The scheduling function of the SCoP statement S has 1386 /// the form S[i,j, k] -> [floor((j mod Nc) / Nr), floor((i mod Mc) / Mr), 1387 /// k mod Kc, j mod Nr, i mod Mr]. 1388 /// 1389 /// To ensure that elements of the array A are read in-stride access, we add 1390 /// a new array Packed_A[Mc/Mr][Kc][Mr] to the SCoP, using 1391 /// Scop::createScopArrayInfo, change the access relation 1392 /// S[i, j, k] -> A[i, k] to 1393 /// S[i, j, k] -> Packed_A[floor((i mod Mc) / Mr), k mod Kc, i mod Mr], using 1394 /// MemoryAccess::setNewAccessRelation, and copy the data to the array, using 1395 /// the copy statement created by Scop::addScopStmt. 1396 /// 1397 /// @param Node The schedule node to be optimized. 1398 /// @param MapOldIndVar The relation, which maps original induction variables 1399 /// to the ones, which are produced by schedule 1400 /// transformations. 1401 /// @param MicroParams, MacroParams Parameters of the BLIS kernel 1402 /// to be taken into account. 1403 /// @param MMI Parameters of the matrix multiplication operands. 1404 /// @return The optimized schedule node. 1405 static isl::schedule_node 1406 optimizeDataLayoutMatrMulPattern(isl::schedule_node Node, isl::map MapOldIndVar, 1407 MicroKernelParamsTy MicroParams, 1408 MacroKernelParamsTy MacroParams, 1409 MatMulInfoTy &MMI) { 1410 auto InputDimsId = MapOldIndVar.get_tuple_id(isl::dim::in); 1411 auto *Stmt = static_cast<ScopStmt *>(InputDimsId.get_user()); 1412 1413 // Create a copy statement that corresponds to the memory access to the 1414 // matrix B, the second operand of the matrix multiplication. 1415 Node = Node.parent().parent().parent().parent().parent().parent(); 1416 Node = isl::manage(isl_schedule_node_band_split(Node.release(), 2)).child(0); 1417 auto AccRel = getMatMulAccRel(MapOldIndVar, 3, 7); 1418 unsigned FirstDimSize = MacroParams.Nc / MicroParams.Nr; 1419 unsigned SecondDimSize = MacroParams.Kc; 1420 unsigned ThirdDimSize = MicroParams.Nr; 1421 auto *SAI = Stmt->getParent()->createScopArrayInfo( 1422 MMI.B->getElementType(), "Packed_B", 1423 {FirstDimSize, SecondDimSize, ThirdDimSize}); 1424 AccRel = AccRel.set_tuple_id(isl::dim::out, SAI->getBasePtrId()); 1425 auto OldAcc = MMI.B->getLatestAccessRelation(); 1426 MMI.B->setNewAccessRelation(AccRel); 1427 auto ExtMap = MapOldIndVar.project_out(isl::dim::out, 2, 1428 MapOldIndVar.dim(isl::dim::out) - 2); 1429 ExtMap = ExtMap.reverse(); 1430 ExtMap = ExtMap.fix_si(isl::dim::out, MMI.i, 0); 1431 auto Domain = Stmt->getDomain(); 1432 1433 // Restrict the domains of the copy statements to only execute when also its 1434 // originating statement is executed. 1435 auto DomainId = Domain.get_tuple_id(); 1436 auto *NewStmt = Stmt->getParent()->addScopStmt( 1437 OldAcc, MMI.B->getLatestAccessRelation(), Domain); 1438 ExtMap = ExtMap.set_tuple_id(isl::dim::out, DomainId); 1439 ExtMap = ExtMap.intersect_range(Domain); 1440 ExtMap = ExtMap.set_tuple_id(isl::dim::out, NewStmt->getDomainId()); 1441 Node = createExtensionNode(Node, ExtMap); 1442 1443 // Create a copy statement that corresponds to the memory access 1444 // to the matrix A, the first operand of the matrix multiplication. 1445 Node = Node.child(0); 1446 AccRel = getMatMulAccRel(MapOldIndVar, 4, 6); 1447 FirstDimSize = MacroParams.Mc / MicroParams.Mr; 1448 ThirdDimSize = MicroParams.Mr; 1449 SAI = Stmt->getParent()->createScopArrayInfo( 1450 MMI.A->getElementType(), "Packed_A", 1451 {FirstDimSize, SecondDimSize, ThirdDimSize}); 1452 AccRel = AccRel.set_tuple_id(isl::dim::out, SAI->getBasePtrId()); 1453 OldAcc = MMI.A->getLatestAccessRelation(); 1454 MMI.A->setNewAccessRelation(AccRel); 1455 ExtMap = MapOldIndVar.project_out(isl::dim::out, 3, 1456 MapOldIndVar.dim(isl::dim::out) - 3); 1457 ExtMap = ExtMap.reverse(); 1458 ExtMap = ExtMap.fix_si(isl::dim::out, MMI.j, 0); 1459 NewStmt = Stmt->getParent()->addScopStmt( 1460 OldAcc, MMI.A->getLatestAccessRelation(), Domain); 1461 1462 // Restrict the domains of the copy statements to only execute when also its 1463 // originating statement is executed. 1464 ExtMap = ExtMap.set_tuple_id(isl::dim::out, DomainId); 1465 ExtMap = ExtMap.intersect_range(Domain); 1466 ExtMap = ExtMap.set_tuple_id(isl::dim::out, NewStmt->getDomainId()); 1467 Node = createExtensionNode(Node, ExtMap); 1468 return Node.child(0).child(0).child(0).child(0).child(0); 1469 } 1470 1471 /// Get a relation mapping induction variables produced by schedule 1472 /// transformations to the original ones. 1473 /// 1474 /// @param Node The schedule node produced as the result of creation 1475 /// of the BLIS kernels. 1476 /// @param MicroKernelParams, MacroKernelParams Parameters of the BLIS kernel 1477 /// to be taken into account. 1478 /// @return The relation mapping original induction variables to the ones 1479 /// produced by schedule transformation. 1480 /// @see ScheduleTreeOptimizer::createMicroKernel 1481 /// @see ScheduleTreeOptimizer::createMacroKernel 1482 /// @see getMacroKernelParams 1483 isl::map 1484 getInductionVariablesSubstitution(isl::schedule_node Node, 1485 MicroKernelParamsTy MicroKernelParams, 1486 MacroKernelParamsTy MacroKernelParams) { 1487 auto Child = Node.child(0); 1488 auto UnMapOldIndVar = Child.get_prefix_schedule_union_map(); 1489 auto MapOldIndVar = isl::map::from_union_map(UnMapOldIndVar); 1490 if (MapOldIndVar.dim(isl::dim::out) > 9) 1491 return MapOldIndVar.project_out(isl::dim::out, 0, 1492 MapOldIndVar.dim(isl::dim::out) - 9); 1493 return MapOldIndVar; 1494 } 1495 1496 /// Isolate a set of partial tile prefixes and unroll the isolated part. 1497 /// 1498 /// The set should ensure that it contains only partial tile prefixes that have 1499 /// exactly Mr x Nr iterations of the two innermost loops produced by 1500 /// the optimization of the matrix multiplication. Mr and Nr are parameters of 1501 /// the micro-kernel. 1502 /// 1503 /// In case of parametric bounds, this helps to auto-vectorize the unrolled 1504 /// innermost loops, using the SLP vectorizer. 1505 /// 1506 /// @param Node The schedule node to be modified. 1507 /// @param MicroKernelParams Parameters of the micro-kernel 1508 /// to be taken into account. 1509 /// @return The modified isl_schedule_node. 1510 static isl::schedule_node 1511 isolateAndUnrollMatMulInnerLoops(isl::schedule_node Node, 1512 struct MicroKernelParamsTy MicroKernelParams) { 1513 isl::schedule_node Child = Node.get_child(0); 1514 isl::union_map UnMapOldIndVar = Child.get_prefix_schedule_relation(); 1515 isl::set Prefix = isl::map::from_union_map(UnMapOldIndVar).range(); 1516 isl_size Dims = Prefix.dim(isl::dim::set); 1517 Prefix = Prefix.project_out(isl::dim::set, Dims - 1, 1); 1518 Prefix = getPartialTilePrefixes(Prefix, MicroKernelParams.Nr); 1519 Prefix = getPartialTilePrefixes(Prefix, MicroKernelParams.Mr); 1520 1521 isl::union_set IsolateOption = 1522 getIsolateOptions(Prefix.add_dims(isl::dim::set, 3), 3); 1523 isl::ctx Ctx = Node.get_ctx(); 1524 auto Options = IsolateOption.unite(getDimOptions(Ctx, "unroll")); 1525 Options = Options.unite(getUnrollIsolatedSetOptions(Ctx)); 1526 Node = Node.band_set_ast_build_options(Options); 1527 Node = Node.parent().parent().parent(); 1528 IsolateOption = getIsolateOptions(Prefix, 3); 1529 Options = IsolateOption.unite(getDimOptions(Ctx, "separate")); 1530 Node = Node.band_set_ast_build_options(Options); 1531 Node = Node.child(0).child(0).child(0); 1532 return Node; 1533 } 1534 1535 /// Mark @p BasePtr with "Inter iteration alias-free" mark node. 1536 /// 1537 /// @param Node The child of the mark node to be inserted. 1538 /// @param BasePtr The pointer to be marked. 1539 /// @return The modified isl_schedule_node. 1540 static isl::schedule_node markInterIterationAliasFree(isl::schedule_node Node, 1541 Value *BasePtr) { 1542 if (!BasePtr) 1543 return Node; 1544 1545 auto Id = 1546 isl::id::alloc(Node.get_ctx(), "Inter iteration alias-free", BasePtr); 1547 return Node.insert_mark(Id).child(0); 1548 } 1549 1550 /// Insert "Loop Vectorizer Disabled" mark node. 1551 /// 1552 /// @param Node The child of the mark node to be inserted. 1553 /// @return The modified isl_schedule_node. 1554 static isl::schedule_node markLoopVectorizerDisabled(isl::schedule_node Node) { 1555 auto Id = isl::id::alloc(Node.get_ctx(), "Loop Vectorizer Disabled", nullptr); 1556 return Node.insert_mark(Id).child(0); 1557 } 1558 1559 /// Restore the initial ordering of dimensions of the band node 1560 /// 1561 /// In case the band node represents all the dimensions of the iteration 1562 /// domain, recreate the band node to restore the initial ordering of the 1563 /// dimensions. 1564 /// 1565 /// @param Node The band node to be modified. 1566 /// @return The modified schedule node. 1567 static isl::schedule_node 1568 getBandNodeWithOriginDimOrder(isl::schedule_node Node) { 1569 assert(isl_schedule_node_get_type(Node.get()) == isl_schedule_node_band); 1570 if (isl_schedule_node_get_type(Node.child(0).get()) != isl_schedule_node_leaf) 1571 return Node; 1572 auto Domain = Node.get_universe_domain(); 1573 assert(isl_union_set_n_set(Domain.get()) == 1); 1574 if (Node.get_schedule_depth() != 0 || 1575 (isl::set(Domain).dim(isl::dim::set) != 1576 isl_schedule_node_band_n_member(Node.get()))) 1577 return Node; 1578 Node = isl::manage(isl_schedule_node_delete(Node.copy())); 1579 auto PartialSchedulePwAff = Domain.identity_union_pw_multi_aff(); 1580 auto PartialScheduleMultiPwAff = 1581 isl::multi_union_pw_aff(PartialSchedulePwAff); 1582 PartialScheduleMultiPwAff = 1583 PartialScheduleMultiPwAff.reset_tuple_id(isl::dim::set); 1584 return Node.insert_partial_schedule(PartialScheduleMultiPwAff); 1585 } 1586 1587 isl::schedule_node 1588 ScheduleTreeOptimizer::optimizeMatMulPattern(isl::schedule_node Node, 1589 const TargetTransformInfo *TTI, 1590 MatMulInfoTy &MMI) { 1591 assert(TTI && "The target transform info should be provided."); 1592 Node = markInterIterationAliasFree( 1593 Node, MMI.WriteToC->getLatestScopArrayInfo()->getBasePtr()); 1594 int DimOutNum = isl_schedule_node_band_n_member(Node.get()); 1595 assert(DimOutNum > 2 && "In case of the matrix multiplication the loop nest " 1596 "and, consequently, the corresponding scheduling " 1597 "functions have at least three dimensions."); 1598 Node = getBandNodeWithOriginDimOrder(Node); 1599 Node = permuteBandNodeDimensions(Node, MMI.i, DimOutNum - 3); 1600 int NewJ = MMI.j == DimOutNum - 3 ? MMI.i : MMI.j; 1601 int NewK = MMI.k == DimOutNum - 3 ? MMI.i : MMI.k; 1602 Node = permuteBandNodeDimensions(Node, NewJ, DimOutNum - 2); 1603 NewK = NewK == DimOutNum - 2 ? NewJ : NewK; 1604 Node = permuteBandNodeDimensions(Node, NewK, DimOutNum - 1); 1605 auto MicroKernelParams = getMicroKernelParams(TTI, MMI); 1606 auto MacroKernelParams = getMacroKernelParams(TTI, MicroKernelParams, MMI); 1607 Node = createMacroKernel(Node, MacroKernelParams); 1608 Node = createMicroKernel(Node, MicroKernelParams); 1609 if (MacroKernelParams.Mc == 1 || MacroKernelParams.Nc == 1 || 1610 MacroKernelParams.Kc == 1) 1611 return Node; 1612 auto MapOldIndVar = getInductionVariablesSubstitution(Node, MicroKernelParams, 1613 MacroKernelParams); 1614 if (!MapOldIndVar) 1615 return Node; 1616 Node = markLoopVectorizerDisabled(Node.parent()).child(0); 1617 Node = isolateAndUnrollMatMulInnerLoops(Node, MicroKernelParams); 1618 return optimizeDataLayoutMatrMulPattern(Node, MapOldIndVar, MicroKernelParams, 1619 MacroKernelParams, MMI); 1620 } 1621 1622 bool ScheduleTreeOptimizer::isMatrMultPattern(isl::schedule_node Node, 1623 const Dependences *D, 1624 MatMulInfoTy &MMI) { 1625 auto PartialSchedule = isl::manage( 1626 isl_schedule_node_band_get_partial_schedule_union_map(Node.get())); 1627 Node = Node.child(0); 1628 auto LeafType = isl_schedule_node_get_type(Node.get()); 1629 Node = Node.parent(); 1630 if (LeafType != isl_schedule_node_leaf || 1631 isl_schedule_node_band_n_member(Node.get()) < 3 || 1632 Node.get_schedule_depth() != 0 || 1633 isl_union_map_n_map(PartialSchedule.get()) != 1) 1634 return false; 1635 auto NewPartialSchedule = isl::map::from_union_map(PartialSchedule); 1636 if (containsMatrMult(NewPartialSchedule, D, MMI)) 1637 return true; 1638 return false; 1639 } 1640 1641 __isl_give isl_schedule_node * 1642 ScheduleTreeOptimizer::optimizeBand(__isl_take isl_schedule_node *Node, 1643 void *User) { 1644 if (!isTileableBandNode(isl::manage_copy(Node))) 1645 return Node; 1646 1647 const OptimizerAdditionalInfoTy *OAI = 1648 static_cast<const OptimizerAdditionalInfoTy *>(User); 1649 1650 MatMulInfoTy MMI; 1651 if (PMBasedOpts && User && 1652 isMatrMultPattern(isl::manage_copy(Node), OAI->D, MMI)) { 1653 LLVM_DEBUG(dbgs() << "The matrix multiplication pattern was detected\n"); 1654 MatMulOpts++; 1655 return optimizeMatMulPattern(isl::manage(Node), OAI->TTI, MMI).release(); 1656 } 1657 1658 return standardBandOpts(isl::manage(Node), User).release(); 1659 } 1660 1661 isl::schedule 1662 ScheduleTreeOptimizer::optimizeSchedule(isl::schedule Schedule, 1663 const OptimizerAdditionalInfoTy *OAI) { 1664 auto Root = Schedule.get_root(); 1665 Root = optimizeScheduleNode(Root, OAI); 1666 return Root.get_schedule(); 1667 } 1668 1669 isl::schedule_node ScheduleTreeOptimizer::optimizeScheduleNode( 1670 isl::schedule_node Node, const OptimizerAdditionalInfoTy *OAI) { 1671 Node = isl::manage(isl_schedule_node_map_descendant_bottom_up( 1672 Node.release(), optimizeBand, 1673 const_cast<void *>(static_cast<const void *>(OAI)))); 1674 return Node; 1675 } 1676 1677 bool ScheduleTreeOptimizer::isProfitableSchedule(Scop &S, 1678 isl::schedule NewSchedule) { 1679 // To understand if the schedule has been optimized we check if the schedule 1680 // has changed at all. 1681 // TODO: We can improve this by tracking if any necessarily beneficial 1682 // transformations have been performed. This can e.g. be tiling, loop 1683 // interchange, or ...) We can track this either at the place where the 1684 // transformation has been performed or, in case of automatic ILP based 1685 // optimizations, by comparing (yet to be defined) performance metrics 1686 // before/after the scheduling optimizer 1687 // (e.g., #stride-one accesses) 1688 auto NewScheduleMap = NewSchedule.get_map(); 1689 auto OldSchedule = S.getSchedule(); 1690 assert(OldSchedule && "Only IslScheduleOptimizer can insert extension nodes " 1691 "that make Scop::getSchedule() return nullptr."); 1692 bool changed = !OldSchedule.is_equal(NewScheduleMap); 1693 return changed; 1694 } 1695 1696 class IslScheduleOptimizerWrapperPass : public ScopPass { 1697 public: 1698 static char ID; 1699 1700 explicit IslScheduleOptimizerWrapperPass() : ScopPass(ID) {} 1701 1702 ~IslScheduleOptimizerWrapperPass() override { releaseMemory(); } 1703 1704 /// Optimize the schedule of the SCoP @p S. 1705 bool runOnScop(Scop &S) override; 1706 1707 /// Print the new schedule for the SCoP @p S. 1708 void printScop(raw_ostream &OS, Scop &S) const override; 1709 1710 /// Register all analyses and transformation required. 1711 void getAnalysisUsage(AnalysisUsage &AU) const override; 1712 1713 /// Release the internal memory. 1714 void releaseMemory() override { 1715 LastSchedule = nullptr; 1716 IslCtx.reset(); 1717 } 1718 1719 private: 1720 std::shared_ptr<isl_ctx> IslCtx; 1721 isl::schedule LastSchedule; 1722 }; 1723 1724 char IslScheduleOptimizerWrapperPass::ID = 0; 1725 1726 #ifndef NDEBUG 1727 static void printSchedule(llvm::raw_ostream &OS, const isl::schedule &Schedule, 1728 StringRef Desc) { 1729 isl::ctx Ctx = Schedule.get_ctx(); 1730 isl_printer *P = isl_printer_to_str(Ctx.get()); 1731 P = isl_printer_set_yaml_style(P, ISL_YAML_STYLE_BLOCK); 1732 P = isl_printer_print_schedule(P, Schedule.get()); 1733 char *Str = isl_printer_get_str(P); 1734 OS << Desc << ": \n" << Str << "\n"; 1735 free(Str); 1736 isl_printer_free(P); 1737 } 1738 #endif 1739 1740 /// Collect statistics for the schedule tree. 1741 /// 1742 /// @param Schedule The schedule tree to analyze. If not a schedule tree it is 1743 /// ignored. 1744 /// @param Version The version of the schedule tree that is analyzed. 1745 /// 0 for the original schedule tree before any transformation. 1746 /// 1 for the schedule tree after isl's rescheduling. 1747 /// 2 for the schedule tree after optimizations are applied 1748 /// (tiling, pattern matching) 1749 static void walkScheduleTreeForStatistics(isl::schedule Schedule, int Version) { 1750 auto Root = Schedule.get_root(); 1751 if (!Root) 1752 return; 1753 1754 isl_schedule_node_foreach_descendant_top_down( 1755 Root.get(), 1756 [](__isl_keep isl_schedule_node *nodeptr, void *user) -> isl_bool { 1757 isl::schedule_node Node = isl::manage_copy(nodeptr); 1758 int Version = *static_cast<int *>(user); 1759 1760 switch (isl_schedule_node_get_type(Node.get())) { 1761 case isl_schedule_node_band: { 1762 NumBands[Version]++; 1763 if (isl_schedule_node_band_get_permutable(Node.get()) == 1764 isl_bool_true) 1765 NumPermutable[Version]++; 1766 1767 int CountMembers = isl_schedule_node_band_n_member(Node.get()); 1768 NumBandMembers[Version] += CountMembers; 1769 for (int i = 0; i < CountMembers; i += 1) { 1770 if (Node.band_member_get_coincident(i)) 1771 NumCoincident[Version]++; 1772 } 1773 break; 1774 } 1775 1776 case isl_schedule_node_filter: 1777 NumFilters[Version]++; 1778 break; 1779 1780 case isl_schedule_node_extension: 1781 NumExtension[Version]++; 1782 break; 1783 1784 default: 1785 break; 1786 } 1787 1788 return isl_bool_true; 1789 }, 1790 &Version); 1791 } 1792 1793 static bool runIslScheduleOptimizer( 1794 Scop &S, 1795 function_ref<const Dependences &(Dependences::AnalysisLevel)> GetDeps, 1796 TargetTransformInfo *TTI, isl::schedule &LastSchedule) { 1797 // Skip SCoPs in case they're already optimised by PPCGCodeGeneration 1798 if (S.isToBeSkipped()) 1799 return false; 1800 1801 // Skip empty SCoPs but still allow code generation as it will delete the 1802 // loops present but not needed. 1803 if (S.getSize() == 0) { 1804 S.markAsOptimized(); 1805 return false; 1806 } 1807 1808 ScopsProcessed++; 1809 1810 // Schedule without optimizations. 1811 isl::schedule Schedule = S.getScheduleTree(); 1812 walkScheduleTreeForStatistics(S.getScheduleTree(), 0); 1813 LLVM_DEBUG(printSchedule(dbgs(), Schedule, "Original schedule tree")); 1814 1815 bool HasUserTransformation = false; 1816 if (PragmaBasedOpts) { 1817 isl::schedule ManuallyTransformed = 1818 applyManualTransformations(&S, Schedule); 1819 if (!ManuallyTransformed) { 1820 LLVM_DEBUG(dbgs() << "Error during manual optimization\n"); 1821 return false; 1822 } 1823 1824 if (ManuallyTransformed.get() != Schedule.get()) { 1825 // User transformations have precedence over other transformations. 1826 HasUserTransformation = true; 1827 Schedule = std::move(ManuallyTransformed); 1828 LLVM_DEBUG( 1829 printSchedule(dbgs(), Schedule, "After manual transformations")); 1830 } 1831 } 1832 1833 // Only continue if either manual transformations have been applied or we are 1834 // allowed to apply heuristics. 1835 // TODO: Detect disabled heuristics and no user-directed transformation 1836 // metadata earlier in ScopDetection. 1837 if (!HasUserTransformation && S.hasDisableHeuristicsHint()) { 1838 LLVM_DEBUG(dbgs() << "Heuristic optimizations disabled by metadata\n"); 1839 return false; 1840 } 1841 1842 // Get dependency analysis. 1843 const Dependences &D = GetDeps(Dependences::AL_Statement); 1844 if (D.getSharedIslCtx() != S.getSharedIslCtx()) { 1845 LLVM_DEBUG(dbgs() << "DependenceInfo for another SCoP/isl_ctx\n"); 1846 return false; 1847 } 1848 if (!D.hasValidDependences()) { 1849 LLVM_DEBUG(dbgs() << "Dependency information not available\n"); 1850 return false; 1851 } 1852 1853 // Apply ISL's algorithm only if not overriden by the user. Note that 1854 // post-rescheduling optimizations (tiling, pattern-based, prevectorization) 1855 // rely on the coincidence/permutable annotations on schedule tree bands that 1856 // are added by the rescheduling analyzer. Therefore, disabling the 1857 // rescheduler implicitly also disables these optimizations. 1858 if (HasUserTransformation) { 1859 LLVM_DEBUG( 1860 dbgs() << "Skipping rescheduling due to manual transformation\n"); 1861 } else { 1862 // Build input data. 1863 int ValidityKinds = 1864 Dependences::TYPE_RAW | Dependences::TYPE_WAR | Dependences::TYPE_WAW; 1865 int ProximityKinds; 1866 1867 if (OptimizeDeps == "all") 1868 ProximityKinds = 1869 Dependences::TYPE_RAW | Dependences::TYPE_WAR | Dependences::TYPE_WAW; 1870 else if (OptimizeDeps == "raw") 1871 ProximityKinds = Dependences::TYPE_RAW; 1872 else { 1873 errs() << "Do not know how to optimize for '" << OptimizeDeps << "'" 1874 << " Falling back to optimizing all dependences.\n"; 1875 ProximityKinds = 1876 Dependences::TYPE_RAW | Dependences::TYPE_WAR | Dependences::TYPE_WAW; 1877 } 1878 1879 isl::union_set Domain = S.getDomains(); 1880 1881 if (!Domain) 1882 return false; 1883 1884 isl::union_map Validity = D.getDependences(ValidityKinds); 1885 isl::union_map Proximity = D.getDependences(ProximityKinds); 1886 1887 // Simplify the dependences by removing the constraints introduced by the 1888 // domains. This can speed up the scheduling time significantly, as large 1889 // constant coefficients will be removed from the dependences. The 1890 // introduction of some additional dependences reduces the possible 1891 // transformations, but in most cases, such transformation do not seem to be 1892 // interesting anyway. In some cases this option may stop the scheduler to 1893 // find any schedule. 1894 if (SimplifyDeps == "yes") { 1895 Validity = Validity.gist_domain(Domain); 1896 Validity = Validity.gist_range(Domain); 1897 Proximity = Proximity.gist_domain(Domain); 1898 Proximity = Proximity.gist_range(Domain); 1899 } else if (SimplifyDeps != "no") { 1900 errs() 1901 << "warning: Option -polly-opt-simplify-deps should either be 'yes' " 1902 "or 'no'. Falling back to default: 'yes'\n"; 1903 } 1904 1905 LLVM_DEBUG(dbgs() << "\n\nCompute schedule from: "); 1906 LLVM_DEBUG(dbgs() << "Domain := " << Domain << ";\n"); 1907 LLVM_DEBUG(dbgs() << "Proximity := " << Proximity << ";\n"); 1908 LLVM_DEBUG(dbgs() << "Validity := " << Validity << ";\n"); 1909 1910 unsigned IslSerializeSCCs; 1911 1912 if (FusionStrategy == "max") { 1913 IslSerializeSCCs = 0; 1914 } else if (FusionStrategy == "min") { 1915 IslSerializeSCCs = 1; 1916 } else { 1917 errs() << "warning: Unknown fusion strategy. Falling back to maximal " 1918 "fusion.\n"; 1919 IslSerializeSCCs = 0; 1920 } 1921 1922 int IslMaximizeBands; 1923 1924 if (MaximizeBandDepth == "yes") { 1925 IslMaximizeBands = 1; 1926 } else if (MaximizeBandDepth == "no") { 1927 IslMaximizeBands = 0; 1928 } else { 1929 errs() 1930 << "warning: Option -polly-opt-maximize-bands should either be 'yes'" 1931 " or 'no'. Falling back to default: 'yes'\n"; 1932 IslMaximizeBands = 1; 1933 } 1934 1935 int IslOuterCoincidence; 1936 1937 if (OuterCoincidence == "yes") { 1938 IslOuterCoincidence = 1; 1939 } else if (OuterCoincidence == "no") { 1940 IslOuterCoincidence = 0; 1941 } else { 1942 errs() << "warning: Option -polly-opt-outer-coincidence should either be " 1943 "'yes' or 'no'. Falling back to default: 'no'\n"; 1944 IslOuterCoincidence = 0; 1945 } 1946 1947 isl_ctx *Ctx = S.getIslCtx().get(); 1948 1949 isl_options_set_schedule_outer_coincidence(Ctx, IslOuterCoincidence); 1950 isl_options_set_schedule_serialize_sccs(Ctx, IslSerializeSCCs); 1951 isl_options_set_schedule_maximize_band_depth(Ctx, IslMaximizeBands); 1952 isl_options_set_schedule_max_constant_term(Ctx, MaxConstantTerm); 1953 isl_options_set_schedule_max_coefficient(Ctx, MaxCoefficient); 1954 isl_options_set_tile_scale_tile_loops(Ctx, 0); 1955 1956 auto OnErrorStatus = isl_options_get_on_error(Ctx); 1957 isl_options_set_on_error(Ctx, ISL_ON_ERROR_CONTINUE); 1958 1959 auto SC = isl::schedule_constraints::on_domain(Domain); 1960 SC = SC.set_proximity(Proximity); 1961 SC = SC.set_validity(Validity); 1962 SC = SC.set_coincidence(Validity); 1963 Schedule = SC.compute_schedule(); 1964 isl_options_set_on_error(Ctx, OnErrorStatus); 1965 1966 ScopsRescheduled++; 1967 LLVM_DEBUG(printSchedule(dbgs(), Schedule, "After rescheduling")); 1968 } 1969 1970 walkScheduleTreeForStatistics(Schedule, 1); 1971 1972 // In cases the scheduler is not able to optimize the code, we just do not 1973 // touch the schedule. 1974 if (!Schedule) 1975 return false; 1976 1977 // Apply post-rescheduling optimizations. 1978 const OptimizerAdditionalInfoTy OAI = {TTI, const_cast<Dependences *>(&D)}; 1979 Schedule = ScheduleTreeOptimizer::optimizeSchedule(Schedule, &OAI); 1980 Schedule = hoistExtensionNodes(Schedule); 1981 LLVM_DEBUG(printSchedule(dbgs(), Schedule, "After post-optimizations")); 1982 walkScheduleTreeForStatistics(Schedule, 2); 1983 1984 if (!ScheduleTreeOptimizer::isProfitableSchedule(S, Schedule)) 1985 return false; 1986 1987 auto ScopStats = S.getStatistics(); 1988 ScopsOptimized++; 1989 NumAffineLoopsOptimized += ScopStats.NumAffineLoops; 1990 NumBoxedLoopsOptimized += ScopStats.NumBoxedLoops; 1991 LastSchedule = Schedule; 1992 1993 S.setScheduleTree(Schedule); 1994 S.markAsOptimized(); 1995 1996 if (OptimizedScops) 1997 errs() << S; 1998 1999 return false; 2000 } 2001 2002 bool IslScheduleOptimizerWrapperPass::runOnScop(Scop &S) { 2003 releaseMemory(); 2004 2005 Function &F = S.getFunction(); 2006 IslCtx = S.getSharedIslCtx(); 2007 2008 auto getDependences = 2009 [this](Dependences::AnalysisLevel) -> const Dependences & { 2010 return getAnalysis<DependenceInfo>().getDependences( 2011 Dependences::AL_Statement); 2012 }; 2013 // auto &Deps = getAnalysis<DependenceInfo>(); 2014 TargetTransformInfo *TTI = 2015 &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 2016 return runIslScheduleOptimizer(S, getDependences, TTI, LastSchedule); 2017 } 2018 2019 static void runScheduleOptimizerPrinter(raw_ostream &OS, 2020 isl::schedule LastSchedule) { 2021 isl_printer *p; 2022 char *ScheduleStr; 2023 2024 OS << "Calculated schedule:\n"; 2025 2026 if (!LastSchedule) { 2027 OS << "n/a\n"; 2028 return; 2029 } 2030 2031 p = isl_printer_to_str(LastSchedule.get_ctx().get()); 2032 p = isl_printer_set_yaml_style(p, ISL_YAML_STYLE_BLOCK); 2033 p = isl_printer_print_schedule(p, LastSchedule.get()); 2034 ScheduleStr = isl_printer_get_str(p); 2035 isl_printer_free(p); 2036 2037 OS << ScheduleStr << "\n"; 2038 2039 free(ScheduleStr); 2040 } 2041 2042 void IslScheduleOptimizerWrapperPass::printScop(raw_ostream &OS, Scop &) const { 2043 runScheduleOptimizerPrinter(OS, LastSchedule); 2044 } 2045 2046 void IslScheduleOptimizerWrapperPass::getAnalysisUsage( 2047 AnalysisUsage &AU) const { 2048 ScopPass::getAnalysisUsage(AU); 2049 AU.addRequired<DependenceInfo>(); 2050 AU.addRequired<TargetTransformInfoWrapperPass>(); 2051 2052 AU.addPreserved<DependenceInfo>(); 2053 } 2054 2055 } // namespace 2056 2057 Pass *polly::createIslScheduleOptimizerWrapperPass() { 2058 return new IslScheduleOptimizerWrapperPass(); 2059 } 2060 2061 INITIALIZE_PASS_BEGIN(IslScheduleOptimizerWrapperPass, "polly-opt-isl", 2062 "Polly - Optimize schedule of SCoP", false, false); 2063 INITIALIZE_PASS_DEPENDENCY(DependenceInfo); 2064 INITIALIZE_PASS_DEPENDENCY(ScopInfoRegionPass); 2065 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass); 2066 INITIALIZE_PASS_END(IslScheduleOptimizerWrapperPass, "polly-opt-isl", 2067 "Polly - Optimize schedule of SCoP", false, false) 2068 2069 static llvm::PreservedAnalyses 2070 runIslScheduleOptimizerUsingNPM(Scop &S, ScopAnalysisManager &SAM, 2071 ScopStandardAnalysisResults &SAR, SPMUpdater &U, 2072 raw_ostream *OS) { 2073 DependenceAnalysis::Result &Deps = SAM.getResult<DependenceAnalysis>(S, SAR); 2074 auto GetDeps = [&Deps](Dependences::AnalysisLevel) -> const Dependences & { 2075 return Deps.getDependences(Dependences::AL_Statement); 2076 }; 2077 TargetTransformInfo *TTI = &SAR.TTI; 2078 isl::schedule LastSchedule; 2079 bool Modified = runIslScheduleOptimizer(S, GetDeps, TTI, LastSchedule); 2080 if (OS) { 2081 *OS << "Printing analysis 'Polly - Optimize schedule of SCoP' for region: '" 2082 << S.getName() << "' in function '" << S.getFunction().getName() 2083 << "':\n"; 2084 runScheduleOptimizerPrinter(*OS, LastSchedule); 2085 } 2086 2087 if (!Modified) 2088 return PreservedAnalyses::all(); 2089 2090 PreservedAnalyses PA; 2091 PA.preserveSet<AllAnalysesOn<Module>>(); 2092 PA.preserveSet<AllAnalysesOn<Function>>(); 2093 PA.preserveSet<AllAnalysesOn<Loop>>(); 2094 return PA; 2095 } 2096 2097 llvm::PreservedAnalyses 2098 IslScheduleOptimizerPass::run(Scop &S, ScopAnalysisManager &SAM, 2099 ScopStandardAnalysisResults &SAR, SPMUpdater &U) { 2100 return runIslScheduleOptimizerUsingNPM(S, SAM, SAR, U, nullptr); 2101 } 2102 2103 llvm::PreservedAnalyses 2104 IslScheduleOptimizerPrinterPass::run(Scop &S, ScopAnalysisManager &SAM, 2105 ScopStandardAnalysisResults &SAR, 2106 SPMUpdater &U) { 2107 return runIslScheduleOptimizerUsingNPM(S, SAM, SAR, U, &OS); 2108 } 2109