1 //===- Schedule.cpp - Calculate an optimized schedule ---------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This pass generates an entirely new schedule tree from the data dependences 10 // and iteration domains. The new schedule tree is computed in two steps: 11 // 12 // 1) The isl scheduling optimizer is run 13 // 14 // The isl scheduling optimizer creates a new schedule tree that maximizes 15 // parallelism and tileability and minimizes data-dependence distances. The 16 // algorithm used is a modified version of the ``Pluto'' algorithm: 17 // 18 // U. Bondhugula, A. Hartono, J. Ramanujam, and P. Sadayappan. 19 // A Practical Automatic Polyhedral Parallelizer and Locality Optimizer. 20 // In Proceedings of the 2008 ACM SIGPLAN Conference On Programming Language 21 // Design and Implementation, PLDI ’08, pages 101–113. ACM, 2008. 22 // 23 // 2) A set of post-scheduling transformations is applied on the schedule tree. 24 // 25 // These optimizations include: 26 // 27 // - Tiling of the innermost tilable bands 28 // - Prevectorization - The choice of a possible outer loop that is strip-mined 29 // to the innermost level to enable inner-loop 30 // vectorization. 31 // - Some optimizations for spatial locality are also planned. 32 // 33 // For a detailed description of the schedule tree itself please see section 6 34 // of: 35 // 36 // Polyhedral AST generation is more than scanning polyhedra 37 // Tobias Grosser, Sven Verdoolaege, Albert Cohen 38 // ACM Transactions on Programming Languages and Systems (TOPLAS), 39 // 37(4), July 2015 40 // http://www.grosser.es/#pub-polyhedral-AST-generation 41 // 42 // This publication also contains a detailed discussion of the different options 43 // for polyhedral loop unrolling, full/partial tile separation and other uses 44 // of the schedule tree. 45 // 46 //===----------------------------------------------------------------------===// 47 48 #include "polly/ScheduleOptimizer.h" 49 #include "polly/CodeGen/CodeGeneration.h" 50 #include "polly/DependenceInfo.h" 51 #include "polly/LinkAllPasses.h" 52 #include "polly/Options.h" 53 #include "polly/ScheduleTreeTransform.h" 54 #include "polly/ScopInfo.h" 55 #include "polly/ScopPass.h" 56 #include "polly/Simplify.h" 57 #include "polly/Support/ISLOStream.h" 58 #include "llvm/ADT/Statistic.h" 59 #include "llvm/Analysis/TargetTransformInfo.h" 60 #include "llvm/IR/Function.h" 61 #include "llvm/InitializePasses.h" 62 #include "llvm/Support/CommandLine.h" 63 #include "llvm/Support/Debug.h" 64 #include "llvm/Support/raw_ostream.h" 65 #include "isl/ctx.h" 66 #include "isl/options.h" 67 #include "isl/printer.h" 68 #include "isl/schedule.h" 69 #include "isl/schedule_node.h" 70 #include "isl/union_map.h" 71 #include "isl/union_set.h" 72 #include <algorithm> 73 #include <cassert> 74 #include <cmath> 75 #include <cstdint> 76 #include <cstdlib> 77 #include <string> 78 #include <vector> 79 80 using namespace llvm; 81 using namespace polly; 82 83 #define DEBUG_TYPE "polly-opt-isl" 84 85 static cl::opt<std::string> 86 OptimizeDeps("polly-opt-optimize-only", 87 cl::desc("Only a certain kind of dependences (all/raw)"), 88 cl::Hidden, cl::init("all"), cl::ZeroOrMore, 89 cl::cat(PollyCategory)); 90 91 static cl::opt<std::string> 92 SimplifyDeps("polly-opt-simplify-deps", 93 cl::desc("Dependences should be simplified (yes/no)"), 94 cl::Hidden, cl::init("yes"), cl::ZeroOrMore, 95 cl::cat(PollyCategory)); 96 97 static cl::opt<int> MaxConstantTerm( 98 "polly-opt-max-constant-term", 99 cl::desc("The maximal constant term allowed (-1 is unlimited)"), cl::Hidden, 100 cl::init(20), cl::ZeroOrMore, cl::cat(PollyCategory)); 101 102 static cl::opt<int> MaxCoefficient( 103 "polly-opt-max-coefficient", 104 cl::desc("The maximal coefficient allowed (-1 is unlimited)"), cl::Hidden, 105 cl::init(20), cl::ZeroOrMore, cl::cat(PollyCategory)); 106 107 static cl::opt<std::string> FusionStrategy( 108 "polly-opt-fusion", cl::desc("The fusion strategy to choose (min/max)"), 109 cl::Hidden, cl::init("min"), cl::ZeroOrMore, cl::cat(PollyCategory)); 110 111 static cl::opt<std::string> 112 MaximizeBandDepth("polly-opt-maximize-bands", 113 cl::desc("Maximize the band depth (yes/no)"), cl::Hidden, 114 cl::init("yes"), cl::ZeroOrMore, cl::cat(PollyCategory)); 115 116 static cl::opt<std::string> OuterCoincidence( 117 "polly-opt-outer-coincidence", 118 cl::desc("Try to construct schedules where the outer member of each band " 119 "satisfies the coincidence constraints (yes/no)"), 120 cl::Hidden, cl::init("no"), cl::ZeroOrMore, cl::cat(PollyCategory)); 121 122 static cl::opt<int> PrevectorWidth( 123 "polly-prevect-width", 124 cl::desc( 125 "The number of loop iterations to strip-mine for pre-vectorization"), 126 cl::Hidden, cl::init(4), cl::ZeroOrMore, cl::cat(PollyCategory)); 127 128 static cl::opt<bool> FirstLevelTiling("polly-tiling", 129 cl::desc("Enable loop tiling"), 130 cl::init(true), cl::ZeroOrMore, 131 cl::cat(PollyCategory)); 132 133 static cl::opt<int> LatencyVectorFma( 134 "polly-target-latency-vector-fma", 135 cl::desc("The minimal number of cycles between issuing two " 136 "dependent consecutive vector fused multiply-add " 137 "instructions."), 138 cl::Hidden, cl::init(8), cl::ZeroOrMore, cl::cat(PollyCategory)); 139 140 static cl::opt<int> ThroughputVectorFma( 141 "polly-target-throughput-vector-fma", 142 cl::desc("A throughput of the processor floating-point arithmetic units " 143 "expressed in the number of vector fused multiply-add " 144 "instructions per clock cycle."), 145 cl::Hidden, cl::init(1), cl::ZeroOrMore, cl::cat(PollyCategory)); 146 147 // This option, along with --polly-target-2nd-cache-level-associativity, 148 // --polly-target-1st-cache-level-size, and --polly-target-2st-cache-level-size 149 // represent the parameters of the target cache, which do not have typical 150 // values that can be used by default. However, to apply the pattern matching 151 // optimizations, we use the values of the parameters of Intel Core i7-3820 152 // SandyBridge in case the parameters are not specified or not provided by the 153 // TargetTransformInfo. 154 static cl::opt<int> FirstCacheLevelAssociativity( 155 "polly-target-1st-cache-level-associativity", 156 cl::desc("The associativity of the first cache level."), cl::Hidden, 157 cl::init(-1), cl::ZeroOrMore, cl::cat(PollyCategory)); 158 159 static cl::opt<int> FirstCacheLevelDefaultAssociativity( 160 "polly-target-1st-cache-level-default-associativity", 161 cl::desc("The default associativity of the first cache level" 162 " (if not enough were provided by the TargetTransformInfo)."), 163 cl::Hidden, cl::init(8), cl::ZeroOrMore, cl::cat(PollyCategory)); 164 165 static cl::opt<int> SecondCacheLevelAssociativity( 166 "polly-target-2nd-cache-level-associativity", 167 cl::desc("The associativity of the second cache level."), cl::Hidden, 168 cl::init(-1), cl::ZeroOrMore, cl::cat(PollyCategory)); 169 170 static cl::opt<int> SecondCacheLevelDefaultAssociativity( 171 "polly-target-2nd-cache-level-default-associativity", 172 cl::desc("The default associativity of the second cache level" 173 " (if not enough were provided by the TargetTransformInfo)."), 174 cl::Hidden, cl::init(8), cl::ZeroOrMore, cl::cat(PollyCategory)); 175 176 static cl::opt<int> FirstCacheLevelSize( 177 "polly-target-1st-cache-level-size", 178 cl::desc("The size of the first cache level specified in bytes."), 179 cl::Hidden, cl::init(-1), cl::ZeroOrMore, cl::cat(PollyCategory)); 180 181 static cl::opt<int> FirstCacheLevelDefaultSize( 182 "polly-target-1st-cache-level-default-size", 183 cl::desc("The default size of the first cache level specified in bytes" 184 " (if not enough were provided by the TargetTransformInfo)."), 185 cl::Hidden, cl::init(32768), cl::ZeroOrMore, cl::cat(PollyCategory)); 186 187 static cl::opt<int> SecondCacheLevelSize( 188 "polly-target-2nd-cache-level-size", 189 cl::desc("The size of the second level specified in bytes."), cl::Hidden, 190 cl::init(-1), cl::ZeroOrMore, cl::cat(PollyCategory)); 191 192 static cl::opt<int> SecondCacheLevelDefaultSize( 193 "polly-target-2nd-cache-level-default-size", 194 cl::desc("The default size of the second cache level specified in bytes" 195 " (if not enough were provided by the TargetTransformInfo)."), 196 cl::Hidden, cl::init(262144), cl::ZeroOrMore, cl::cat(PollyCategory)); 197 198 static cl::opt<int> VectorRegisterBitwidth( 199 "polly-target-vector-register-bitwidth", 200 cl::desc("The size in bits of a vector register (if not set, this " 201 "information is taken from LLVM's target information."), 202 cl::Hidden, cl::init(-1), cl::ZeroOrMore, cl::cat(PollyCategory)); 203 204 static cl::opt<int> FirstLevelDefaultTileSize( 205 "polly-default-tile-size", 206 cl::desc("The default tile size (if not enough were provided by" 207 " --polly-tile-sizes)"), 208 cl::Hidden, cl::init(32), cl::ZeroOrMore, cl::cat(PollyCategory)); 209 210 static cl::list<int> 211 FirstLevelTileSizes("polly-tile-sizes", 212 cl::desc("A tile size for each loop dimension, filled " 213 "with --polly-default-tile-size"), 214 cl::Hidden, cl::ZeroOrMore, cl::CommaSeparated, 215 cl::cat(PollyCategory)); 216 217 static cl::opt<bool> 218 SecondLevelTiling("polly-2nd-level-tiling", 219 cl::desc("Enable a 2nd level loop of loop tiling"), 220 cl::init(false), cl::ZeroOrMore, cl::cat(PollyCategory)); 221 222 static cl::opt<int> SecondLevelDefaultTileSize( 223 "polly-2nd-level-default-tile-size", 224 cl::desc("The default 2nd-level tile size (if not enough were provided by" 225 " --polly-2nd-level-tile-sizes)"), 226 cl::Hidden, cl::init(16), cl::ZeroOrMore, cl::cat(PollyCategory)); 227 228 static cl::list<int> 229 SecondLevelTileSizes("polly-2nd-level-tile-sizes", 230 cl::desc("A tile size for each loop dimension, filled " 231 "with --polly-default-tile-size"), 232 cl::Hidden, cl::ZeroOrMore, cl::CommaSeparated, 233 cl::cat(PollyCategory)); 234 235 static cl::opt<bool> RegisterTiling("polly-register-tiling", 236 cl::desc("Enable register tiling"), 237 cl::init(false), cl::ZeroOrMore, 238 cl::cat(PollyCategory)); 239 240 static cl::opt<int> RegisterDefaultTileSize( 241 "polly-register-tiling-default-tile-size", 242 cl::desc("The default register tile size (if not enough were provided by" 243 " --polly-register-tile-sizes)"), 244 cl::Hidden, cl::init(2), cl::ZeroOrMore, cl::cat(PollyCategory)); 245 246 static cl::opt<int> PollyPatternMatchingNcQuotient( 247 "polly-pattern-matching-nc-quotient", 248 cl::desc("Quotient that is obtained by dividing Nc, the parameter of the" 249 "macro-kernel, by Nr, the parameter of the micro-kernel"), 250 cl::Hidden, cl::init(256), cl::ZeroOrMore, cl::cat(PollyCategory)); 251 252 static cl::list<int> 253 RegisterTileSizes("polly-register-tile-sizes", 254 cl::desc("A tile size for each loop dimension, filled " 255 "with --polly-register-tile-size"), 256 cl::Hidden, cl::ZeroOrMore, cl::CommaSeparated, 257 cl::cat(PollyCategory)); 258 259 static cl::opt<bool> 260 PMBasedOpts("polly-pattern-matching-based-opts", 261 cl::desc("Perform optimizations based on pattern matching"), 262 cl::init(true), cl::ZeroOrMore, cl::cat(PollyCategory)); 263 264 static cl::opt<bool> OptimizedScops( 265 "polly-optimized-scops", 266 cl::desc("Polly - Dump polyhedral description of Scops optimized with " 267 "the isl scheduling optimizer and the set of post-scheduling " 268 "transformations is applied on the schedule tree"), 269 cl::init(false), cl::ZeroOrMore, cl::cat(PollyCategory)); 270 271 STATISTIC(ScopsProcessed, "Number of scops processed"); 272 STATISTIC(ScopsRescheduled, "Number of scops rescheduled"); 273 STATISTIC(ScopsOptimized, "Number of scops optimized"); 274 275 STATISTIC(NumAffineLoopsOptimized, "Number of affine loops optimized"); 276 STATISTIC(NumBoxedLoopsOptimized, "Number of boxed loops optimized"); 277 278 #define THREE_STATISTICS(VARNAME, DESC) \ 279 static Statistic VARNAME[3] = { \ 280 {DEBUG_TYPE, #VARNAME "0", DESC " (original)"}, \ 281 {DEBUG_TYPE, #VARNAME "1", DESC " (after scheduler)"}, \ 282 {DEBUG_TYPE, #VARNAME "2", DESC " (after optimizer)"}} 283 284 THREE_STATISTICS(NumBands, "Number of bands"); 285 THREE_STATISTICS(NumBandMembers, "Number of band members"); 286 THREE_STATISTICS(NumCoincident, "Number of coincident band members"); 287 THREE_STATISTICS(NumPermutable, "Number of permutable bands"); 288 THREE_STATISTICS(NumFilters, "Number of filter nodes"); 289 THREE_STATISTICS(NumExtension, "Number of extension nodes"); 290 291 STATISTIC(FirstLevelTileOpts, "Number of first level tiling applied"); 292 STATISTIC(SecondLevelTileOpts, "Number of second level tiling applied"); 293 STATISTIC(RegisterTileOpts, "Number of register tiling applied"); 294 STATISTIC(PrevectOpts, "Number of strip-mining for prevectorization applied"); 295 STATISTIC(MatMulOpts, 296 "Number of matrix multiplication patterns detected and optimized"); 297 298 /// Create an isl::union_set, which describes the isolate option based on 299 /// IsolateDomain. 300 /// 301 /// @param IsolateDomain An isl::set whose @p OutDimsNum last dimensions should 302 /// belong to the current band node. 303 /// @param OutDimsNum A number of dimensions that should belong to 304 /// the current band node. 305 static isl::union_set getIsolateOptions(isl::set IsolateDomain, 306 unsigned OutDimsNum) { 307 unsigned Dims = IsolateDomain.dim(isl::dim::set); 308 assert(OutDimsNum <= Dims && 309 "The isl::set IsolateDomain is used to describe the range of schedule " 310 "dimensions values, which should be isolated. Consequently, the " 311 "number of its dimensions should be greater than or equal to the " 312 "number of the schedule dimensions."); 313 isl::map IsolateRelation = isl::map::from_domain(IsolateDomain); 314 IsolateRelation = IsolateRelation.move_dims(isl::dim::out, 0, isl::dim::in, 315 Dims - OutDimsNum, OutDimsNum); 316 isl::set IsolateOption = IsolateRelation.wrap(); 317 isl::id Id = isl::id::alloc(IsolateOption.get_ctx(), "isolate", nullptr); 318 IsolateOption = IsolateOption.set_tuple_id(Id); 319 return isl::union_set(IsolateOption); 320 } 321 322 namespace { 323 /// Create an isl::union_set, which describes the specified option for the 324 /// dimension of the current node. 325 /// 326 /// @param Ctx An isl::ctx, which is used to create the isl::union_set. 327 /// @param Option The name of the option. 328 isl::union_set getDimOptions(isl::ctx Ctx, const char *Option) { 329 isl::space Space(Ctx, 0, 1); 330 auto DimOption = isl::set::universe(Space); 331 auto Id = isl::id::alloc(Ctx, Option, nullptr); 332 DimOption = DimOption.set_tuple_id(Id); 333 return isl::union_set(DimOption); 334 } 335 } // namespace 336 337 /// Create an isl::union_set, which describes the option of the form 338 /// [isolate[] -> unroll[x]]. 339 /// 340 /// @param Ctx An isl::ctx, which is used to create the isl::union_set. 341 static isl::union_set getUnrollIsolatedSetOptions(isl::ctx Ctx) { 342 isl::space Space = isl::space(Ctx, 0, 0, 1); 343 isl::map UnrollIsolatedSetOption = isl::map::universe(Space); 344 isl::id DimInId = isl::id::alloc(Ctx, "isolate", nullptr); 345 isl::id DimOutId = isl::id::alloc(Ctx, "unroll", nullptr); 346 UnrollIsolatedSetOption = 347 UnrollIsolatedSetOption.set_tuple_id(isl::dim::in, DimInId); 348 UnrollIsolatedSetOption = 349 UnrollIsolatedSetOption.set_tuple_id(isl::dim::out, DimOutId); 350 return UnrollIsolatedSetOption.wrap(); 351 } 352 353 /// Make the last dimension of Set to take values from 0 to VectorWidth - 1. 354 /// 355 /// @param Set A set, which should be modified. 356 /// @param VectorWidth A parameter, which determines the constraint. 357 static isl::set addExtentConstraints(isl::set Set, int VectorWidth) { 358 unsigned Dims = Set.dim(isl::dim::set); 359 isl::space Space = Set.get_space(); 360 isl::local_space LocalSpace = isl::local_space(Space); 361 isl::constraint ExtConstr = isl::constraint::alloc_inequality(LocalSpace); 362 ExtConstr = ExtConstr.set_constant_si(0); 363 ExtConstr = ExtConstr.set_coefficient_si(isl::dim::set, Dims - 1, 1); 364 Set = Set.add_constraint(ExtConstr); 365 ExtConstr = isl::constraint::alloc_inequality(LocalSpace); 366 ExtConstr = ExtConstr.set_constant_si(VectorWidth - 1); 367 ExtConstr = ExtConstr.set_coefficient_si(isl::dim::set, Dims - 1, -1); 368 return Set.add_constraint(ExtConstr); 369 } 370 371 isl::set getPartialTilePrefixes(isl::set ScheduleRange, int VectorWidth) { 372 unsigned Dims = ScheduleRange.dim(isl::dim::set); 373 isl::set LoopPrefixes = 374 ScheduleRange.drop_constraints_involving_dims(isl::dim::set, Dims - 1, 1); 375 auto ExtentPrefixes = addExtentConstraints(LoopPrefixes, VectorWidth); 376 isl::set BadPrefixes = ExtentPrefixes.subtract(ScheduleRange); 377 BadPrefixes = BadPrefixes.project_out(isl::dim::set, Dims - 1, 1); 378 LoopPrefixes = LoopPrefixes.project_out(isl::dim::set, Dims - 1, 1); 379 return LoopPrefixes.subtract(BadPrefixes); 380 } 381 382 isl::schedule_node 383 ScheduleTreeOptimizer::isolateFullPartialTiles(isl::schedule_node Node, 384 int VectorWidth) { 385 assert(isl_schedule_node_get_type(Node.get()) == isl_schedule_node_band); 386 Node = Node.child(0).child(0); 387 isl::union_map SchedRelUMap = Node.get_prefix_schedule_relation(); 388 isl::union_set ScheduleRangeUSet = SchedRelUMap.range(); 389 isl::set ScheduleRange{ScheduleRangeUSet}; 390 isl::set IsolateDomain = getPartialTilePrefixes(ScheduleRange, VectorWidth); 391 auto AtomicOption = getDimOptions(IsolateDomain.get_ctx(), "atomic"); 392 isl::union_set IsolateOption = getIsolateOptions(IsolateDomain, 1); 393 Node = Node.parent().parent(); 394 isl::union_set Options = IsolateOption.unite(AtomicOption); 395 Node = Node.band_set_ast_build_options(Options); 396 return Node; 397 } 398 399 isl::schedule_node ScheduleTreeOptimizer::prevectSchedBand( 400 isl::schedule_node Node, unsigned DimToVectorize, int VectorWidth) { 401 assert(isl_schedule_node_get_type(Node.get()) == isl_schedule_node_band); 402 403 auto Space = isl::manage(isl_schedule_node_band_get_space(Node.get())); 404 auto ScheduleDimensions = Space.dim(isl::dim::set); 405 assert(DimToVectorize < ScheduleDimensions); 406 407 if (DimToVectorize > 0) { 408 Node = isl::manage( 409 isl_schedule_node_band_split(Node.release(), DimToVectorize)); 410 Node = Node.child(0); 411 } 412 if (DimToVectorize < ScheduleDimensions - 1) 413 Node = isl::manage(isl_schedule_node_band_split(Node.release(), 1)); 414 Space = isl::manage(isl_schedule_node_band_get_space(Node.get())); 415 auto Sizes = isl::multi_val::zero(Space); 416 Sizes = Sizes.set_val(0, isl::val(Node.get_ctx(), VectorWidth)); 417 Node = 418 isl::manage(isl_schedule_node_band_tile(Node.release(), Sizes.release())); 419 Node = isolateFullPartialTiles(Node, VectorWidth); 420 Node = Node.child(0); 421 // Make sure the "trivially vectorizable loop" is not unrolled. Otherwise, 422 // we will have troubles to match it in the backend. 423 Node = Node.band_set_ast_build_options( 424 isl::union_set(Node.get_ctx(), "{ unroll[x]: 1 = 0 }")); 425 Node = isl::manage(isl_schedule_node_band_sink(Node.release())); 426 Node = Node.child(0); 427 if (isl_schedule_node_get_type(Node.get()) == isl_schedule_node_leaf) 428 Node = Node.parent(); 429 auto LoopMarker = isl::id::alloc(Node.get_ctx(), "SIMD", nullptr); 430 PrevectOpts++; 431 return Node.insert_mark(LoopMarker); 432 } 433 434 isl::schedule_node ScheduleTreeOptimizer::tileNode(isl::schedule_node Node, 435 const char *Identifier, 436 ArrayRef<int> TileSizes, 437 int DefaultTileSize) { 438 auto Space = isl::manage(isl_schedule_node_band_get_space(Node.get())); 439 auto Dims = Space.dim(isl::dim::set); 440 auto Sizes = isl::multi_val::zero(Space); 441 std::string IdentifierString(Identifier); 442 for (unsigned i = 0; i < Dims; i++) { 443 auto tileSize = i < TileSizes.size() ? TileSizes[i] : DefaultTileSize; 444 Sizes = Sizes.set_val(i, isl::val(Node.get_ctx(), tileSize)); 445 } 446 auto TileLoopMarkerStr = IdentifierString + " - Tiles"; 447 auto TileLoopMarker = 448 isl::id::alloc(Node.get_ctx(), TileLoopMarkerStr, nullptr); 449 Node = Node.insert_mark(TileLoopMarker); 450 Node = Node.child(0); 451 Node = 452 isl::manage(isl_schedule_node_band_tile(Node.release(), Sizes.release())); 453 Node = Node.child(0); 454 auto PointLoopMarkerStr = IdentifierString + " - Points"; 455 auto PointLoopMarker = 456 isl::id::alloc(Node.get_ctx(), PointLoopMarkerStr, nullptr); 457 Node = Node.insert_mark(PointLoopMarker); 458 return Node.child(0); 459 } 460 461 isl::schedule_node ScheduleTreeOptimizer::applyRegisterTiling( 462 isl::schedule_node Node, ArrayRef<int> TileSizes, int DefaultTileSize) { 463 Node = tileNode(Node, "Register tiling", TileSizes, DefaultTileSize); 464 auto Ctx = Node.get_ctx(); 465 return Node.band_set_ast_build_options(isl::union_set(Ctx, "{unroll[x]}")); 466 } 467 468 static bool isSimpleInnermostBand(const isl::schedule_node &Node) { 469 assert(isl_schedule_node_get_type(Node.get()) == isl_schedule_node_band); 470 assert(isl_schedule_node_n_children(Node.get()) == 1); 471 472 auto ChildType = isl_schedule_node_get_type(Node.child(0).get()); 473 474 if (ChildType == isl_schedule_node_leaf) 475 return true; 476 477 if (ChildType != isl_schedule_node_sequence) 478 return false; 479 480 auto Sequence = Node.child(0); 481 482 for (int c = 0, nc = isl_schedule_node_n_children(Sequence.get()); c < nc; 483 ++c) { 484 auto Child = Sequence.child(c); 485 if (isl_schedule_node_get_type(Child.get()) != isl_schedule_node_filter) 486 return false; 487 if (isl_schedule_node_get_type(Child.child(0).get()) != 488 isl_schedule_node_leaf) 489 return false; 490 } 491 return true; 492 } 493 494 bool ScheduleTreeOptimizer::isTileableBandNode(isl::schedule_node Node) { 495 if (isl_schedule_node_get_type(Node.get()) != isl_schedule_node_band) 496 return false; 497 498 if (isl_schedule_node_n_children(Node.get()) != 1) 499 return false; 500 501 if (!isl_schedule_node_band_get_permutable(Node.get())) 502 return false; 503 504 auto Space = isl::manage(isl_schedule_node_band_get_space(Node.get())); 505 auto Dims = Space.dim(isl::dim::set); 506 507 if (Dims <= 1) 508 return false; 509 510 return isSimpleInnermostBand(Node); 511 } 512 513 __isl_give isl::schedule_node 514 ScheduleTreeOptimizer::standardBandOpts(isl::schedule_node Node, void *User) { 515 if (FirstLevelTiling) { 516 Node = tileNode(Node, "1st level tiling", FirstLevelTileSizes, 517 FirstLevelDefaultTileSize); 518 FirstLevelTileOpts++; 519 } 520 521 if (SecondLevelTiling) { 522 Node = tileNode(Node, "2nd level tiling", SecondLevelTileSizes, 523 SecondLevelDefaultTileSize); 524 SecondLevelTileOpts++; 525 } 526 527 if (RegisterTiling) { 528 Node = 529 applyRegisterTiling(Node, RegisterTileSizes, RegisterDefaultTileSize); 530 RegisterTileOpts++; 531 } 532 533 if (PollyVectorizerChoice == VECTORIZER_NONE) 534 return Node; 535 536 auto Space = isl::manage(isl_schedule_node_band_get_space(Node.get())); 537 auto Dims = Space.dim(isl::dim::set); 538 539 for (int i = Dims - 1; i >= 0; i--) 540 if (Node.band_member_get_coincident(i)) { 541 Node = prevectSchedBand(Node, i, PrevectorWidth); 542 break; 543 } 544 545 return Node; 546 } 547 548 /// Permute the two dimensions of the isl map. 549 /// 550 /// Permute @p DstPos and @p SrcPos dimensions of the isl map @p Map that 551 /// have type @p DimType. 552 /// 553 /// @param Map The isl map to be modified. 554 /// @param DimType The type of the dimensions. 555 /// @param DstPos The first dimension. 556 /// @param SrcPos The second dimension. 557 /// @return The modified map. 558 isl::map permuteDimensions(isl::map Map, isl::dim DimType, unsigned DstPos, 559 unsigned SrcPos) { 560 assert(DstPos < Map.dim(DimType) && SrcPos < Map.dim(DimType)); 561 if (DstPos == SrcPos) 562 return Map; 563 isl::id DimId; 564 if (Map.has_tuple_id(DimType)) 565 DimId = Map.get_tuple_id(DimType); 566 auto FreeDim = DimType == isl::dim::in ? isl::dim::out : isl::dim::in; 567 isl::id FreeDimId; 568 if (Map.has_tuple_id(FreeDim)) 569 FreeDimId = Map.get_tuple_id(FreeDim); 570 auto MaxDim = std::max(DstPos, SrcPos); 571 auto MinDim = std::min(DstPos, SrcPos); 572 Map = Map.move_dims(FreeDim, 0, DimType, MaxDim, 1); 573 Map = Map.move_dims(FreeDim, 0, DimType, MinDim, 1); 574 Map = Map.move_dims(DimType, MinDim, FreeDim, 1, 1); 575 Map = Map.move_dims(DimType, MaxDim, FreeDim, 0, 1); 576 if (DimId) 577 Map = Map.set_tuple_id(DimType, DimId); 578 if (FreeDimId) 579 Map = Map.set_tuple_id(FreeDim, FreeDimId); 580 return Map; 581 } 582 583 /// Check the form of the access relation. 584 /// 585 /// Check that the access relation @p AccMap has the form M[i][j], where i 586 /// is a @p FirstPos and j is a @p SecondPos. 587 /// 588 /// @param AccMap The access relation to be checked. 589 /// @param FirstPos The index of the input dimension that is mapped to 590 /// the first output dimension. 591 /// @param SecondPos The index of the input dimension that is mapped to the 592 /// second output dimension. 593 /// @return True in case @p AccMap has the expected form and false, 594 /// otherwise. 595 static bool isMatMulOperandAcc(isl::set Domain, isl::map AccMap, int &FirstPos, 596 int &SecondPos) { 597 isl::space Space = AccMap.get_space(); 598 isl::map Universe = isl::map::universe(Space); 599 600 if (Space.dim(isl::dim::out) != 2) 601 return false; 602 603 // MatMul has the form: 604 // for (i = 0; i < N; i++) 605 // for (j = 0; j < M; j++) 606 // for (k = 0; k < P; k++) 607 // C[i, j] += A[i, k] * B[k, j] 608 // 609 // Permutation of three outer loops: 3! = 6 possibilities. 610 int FirstDims[] = {0, 0, 1, 1, 2, 2}; 611 int SecondDims[] = {1, 2, 2, 0, 0, 1}; 612 for (int i = 0; i < 6; i += 1) { 613 auto PossibleMatMul = 614 Universe.equate(isl::dim::in, FirstDims[i], isl::dim::out, 0) 615 .equate(isl::dim::in, SecondDims[i], isl::dim::out, 1); 616 617 AccMap = AccMap.intersect_domain(Domain); 618 PossibleMatMul = PossibleMatMul.intersect_domain(Domain); 619 620 // If AccMap spans entire domain (Non-partial write), 621 // compute FirstPos and SecondPos. 622 // If AccMap != PossibleMatMul here (the two maps have been gisted at 623 // this point), it means that the writes are not complete, or in other 624 // words, it is a Partial write and Partial writes must be rejected. 625 if (AccMap.is_equal(PossibleMatMul)) { 626 if (FirstPos != -1 && FirstPos != FirstDims[i]) 627 continue; 628 FirstPos = FirstDims[i]; 629 if (SecondPos != -1 && SecondPos != SecondDims[i]) 630 continue; 631 SecondPos = SecondDims[i]; 632 return true; 633 } 634 } 635 636 return false; 637 } 638 639 /// Does the memory access represent a non-scalar operand of the matrix 640 /// multiplication. 641 /// 642 /// Check that the memory access @p MemAccess is the read access to a non-scalar 643 /// operand of the matrix multiplication or its result. 644 /// 645 /// @param MemAccess The memory access to be checked. 646 /// @param MMI Parameters of the matrix multiplication operands. 647 /// @return True in case the memory access represents the read access 648 /// to a non-scalar operand of the matrix multiplication and 649 /// false, otherwise. 650 static bool isMatMulNonScalarReadAccess(MemoryAccess *MemAccess, 651 MatMulInfoTy &MMI) { 652 if (!MemAccess->isLatestArrayKind() || !MemAccess->isRead()) 653 return false; 654 auto AccMap = MemAccess->getLatestAccessRelation(); 655 isl::set StmtDomain = MemAccess->getStatement()->getDomain(); 656 if (isMatMulOperandAcc(StmtDomain, AccMap, MMI.i, MMI.j) && !MMI.ReadFromC) { 657 MMI.ReadFromC = MemAccess; 658 return true; 659 } 660 if (isMatMulOperandAcc(StmtDomain, AccMap, MMI.i, MMI.k) && !MMI.A) { 661 MMI.A = MemAccess; 662 return true; 663 } 664 if (isMatMulOperandAcc(StmtDomain, AccMap, MMI.k, MMI.j) && !MMI.B) { 665 MMI.B = MemAccess; 666 return true; 667 } 668 return false; 669 } 670 671 /// Check accesses to operands of the matrix multiplication. 672 /// 673 /// Check that accesses of the SCoP statement, which corresponds to 674 /// the partial schedule @p PartialSchedule, are scalar in terms of loops 675 /// containing the matrix multiplication, in case they do not represent 676 /// accesses to the non-scalar operands of the matrix multiplication or 677 /// its result. 678 /// 679 /// @param PartialSchedule The partial schedule of the SCoP statement. 680 /// @param MMI Parameters of the matrix multiplication operands. 681 /// @return True in case the corresponding SCoP statement 682 /// represents matrix multiplication and false, 683 /// otherwise. 684 static bool containsOnlyMatrMultAcc(isl::map PartialSchedule, 685 MatMulInfoTy &MMI) { 686 auto InputDimId = PartialSchedule.get_tuple_id(isl::dim::in); 687 auto *Stmt = static_cast<ScopStmt *>(InputDimId.get_user()); 688 unsigned OutDimNum = PartialSchedule.dim(isl::dim::out); 689 assert(OutDimNum > 2 && "In case of the matrix multiplication the loop nest " 690 "and, consequently, the corresponding scheduling " 691 "functions have at least three dimensions."); 692 auto MapI = 693 permuteDimensions(PartialSchedule, isl::dim::out, MMI.i, OutDimNum - 1); 694 auto MapJ = 695 permuteDimensions(PartialSchedule, isl::dim::out, MMI.j, OutDimNum - 1); 696 auto MapK = 697 permuteDimensions(PartialSchedule, isl::dim::out, MMI.k, OutDimNum - 1); 698 699 auto Accesses = getAccessesInOrder(*Stmt); 700 for (auto *MemA = Accesses.begin(); MemA != Accesses.end() - 1; MemA++) { 701 auto *MemAccessPtr = *MemA; 702 if (MemAccessPtr->isLatestArrayKind() && MemAccessPtr != MMI.WriteToC && 703 !isMatMulNonScalarReadAccess(MemAccessPtr, MMI) && 704 !(MemAccessPtr->isStrideZero(MapI)) && 705 MemAccessPtr->isStrideZero(MapJ) && MemAccessPtr->isStrideZero(MapK)) 706 return false; 707 } 708 return true; 709 } 710 711 /// Check for dependencies corresponding to the matrix multiplication. 712 /// 713 /// Check that there is only true dependence of the form 714 /// S(..., k, ...) -> S(..., k + 1, …), where S is the SCoP statement 715 /// represented by @p Schedule and k is @p Pos. Such a dependence corresponds 716 /// to the dependency produced by the matrix multiplication. 717 /// 718 /// @param Schedule The schedule of the SCoP statement. 719 /// @param D The SCoP dependencies. 720 /// @param Pos The parameter to describe an acceptable true dependence. 721 /// In case it has a negative value, try to determine its 722 /// acceptable value. 723 /// @return True in case dependencies correspond to the matrix multiplication 724 /// and false, otherwise. 725 static bool containsOnlyMatMulDep(isl::map Schedule, const Dependences *D, 726 int &Pos) { 727 isl::union_map Dep = D->getDependences(Dependences::TYPE_RAW); 728 isl::union_map Red = D->getDependences(Dependences::TYPE_RED); 729 if (Red) 730 Dep = Dep.unite(Red); 731 auto DomainSpace = Schedule.get_space().domain(); 732 auto Space = DomainSpace.map_from_domain_and_range(DomainSpace); 733 auto Deltas = Dep.extract_map(Space).deltas(); 734 int DeltasDimNum = Deltas.dim(isl::dim::set); 735 for (int i = 0; i < DeltasDimNum; i++) { 736 auto Val = Deltas.plain_get_val_if_fixed(isl::dim::set, i); 737 Pos = Pos < 0 && Val.is_one() ? i : Pos; 738 if (Val.is_nan() || !(Val.is_zero() || (i == Pos && Val.is_one()))) 739 return false; 740 } 741 if (DeltasDimNum == 0 || Pos < 0) 742 return false; 743 return true; 744 } 745 746 /// Check if the SCoP statement could probably be optimized with analytical 747 /// modeling. 748 /// 749 /// containsMatrMult tries to determine whether the following conditions 750 /// are true: 751 /// 1. The last memory access modeling an array, MA1, represents writing to 752 /// memory and has the form S(..., i1, ..., i2, ...) -> M(i1, i2) or 753 /// S(..., i2, ..., i1, ...) -> M(i1, i2), where S is the SCoP statement 754 /// under consideration. 755 /// 2. There is only one loop-carried true dependency, and it has the 756 /// form S(..., i3, ...) -> S(..., i3 + 1, ...), and there are no 757 /// loop-carried or anti dependencies. 758 /// 3. SCoP contains three access relations, MA2, MA3, and MA4 that represent 759 /// reading from memory and have the form S(..., i3, ...) -> M(i1, i3), 760 /// S(..., i3, ...) -> M(i3, i2), S(...) -> M(i1, i2), respectively, 761 /// and all memory accesses of the SCoP that are different from MA1, MA2, 762 /// MA3, and MA4 have stride 0, if the innermost loop is exchanged with any 763 /// of loops i1, i2 and i3. 764 /// 765 /// @param PartialSchedule The PartialSchedule that contains a SCoP statement 766 /// to check. 767 /// @D The SCoP dependencies. 768 /// @MMI Parameters of the matrix multiplication operands. 769 static bool containsMatrMult(isl::map PartialSchedule, const Dependences *D, 770 MatMulInfoTy &MMI) { 771 auto InputDimsId = PartialSchedule.get_tuple_id(isl::dim::in); 772 auto *Stmt = static_cast<ScopStmt *>(InputDimsId.get_user()); 773 if (Stmt->size() <= 1) 774 return false; 775 776 auto Accesses = getAccessesInOrder(*Stmt); 777 for (auto *MemA = Accesses.end() - 1; MemA != Accesses.begin(); MemA--) { 778 auto *MemAccessPtr = *MemA; 779 if (!MemAccessPtr->isLatestArrayKind()) 780 continue; 781 if (!MemAccessPtr->isWrite()) 782 return false; 783 auto AccMap = MemAccessPtr->getLatestAccessRelation(); 784 if (!isMatMulOperandAcc(Stmt->getDomain(), AccMap, MMI.i, MMI.j)) 785 return false; 786 MMI.WriteToC = MemAccessPtr; 787 break; 788 } 789 790 if (!containsOnlyMatMulDep(PartialSchedule, D, MMI.k)) 791 return false; 792 793 if (!MMI.WriteToC || !containsOnlyMatrMultAcc(PartialSchedule, MMI)) 794 return false; 795 796 if (!MMI.A || !MMI.B || !MMI.ReadFromC) 797 return false; 798 return true; 799 } 800 801 /// Permute two dimensions of the band node. 802 /// 803 /// Permute FirstDim and SecondDim dimensions of the Node. 804 /// 805 /// @param Node The band node to be modified. 806 /// @param FirstDim The first dimension to be permuted. 807 /// @param SecondDim The second dimension to be permuted. 808 static isl::schedule_node permuteBandNodeDimensions(isl::schedule_node Node, 809 unsigned FirstDim, 810 unsigned SecondDim) { 811 assert(isl_schedule_node_get_type(Node.get()) == isl_schedule_node_band && 812 (unsigned)isl_schedule_node_band_n_member(Node.get()) > 813 std::max(FirstDim, SecondDim)); 814 auto PartialSchedule = 815 isl::manage(isl_schedule_node_band_get_partial_schedule(Node.get())); 816 auto PartialScheduleFirstDim = PartialSchedule.get_union_pw_aff(FirstDim); 817 auto PartialScheduleSecondDim = PartialSchedule.get_union_pw_aff(SecondDim); 818 PartialSchedule = 819 PartialSchedule.set_union_pw_aff(SecondDim, PartialScheduleFirstDim); 820 PartialSchedule = 821 PartialSchedule.set_union_pw_aff(FirstDim, PartialScheduleSecondDim); 822 Node = isl::manage(isl_schedule_node_delete(Node.release())); 823 return Node.insert_partial_schedule(PartialSchedule); 824 } 825 826 isl::schedule_node ScheduleTreeOptimizer::createMicroKernel( 827 isl::schedule_node Node, MicroKernelParamsTy MicroKernelParams) { 828 Node = applyRegisterTiling(Node, {MicroKernelParams.Mr, MicroKernelParams.Nr}, 829 1); 830 Node = Node.parent().parent(); 831 return permuteBandNodeDimensions(Node, 0, 1).child(0).child(0); 832 } 833 834 isl::schedule_node ScheduleTreeOptimizer::createMacroKernel( 835 isl::schedule_node Node, MacroKernelParamsTy MacroKernelParams) { 836 assert(isl_schedule_node_get_type(Node.get()) == isl_schedule_node_band); 837 if (MacroKernelParams.Mc == 1 && MacroKernelParams.Nc == 1 && 838 MacroKernelParams.Kc == 1) 839 return Node; 840 int DimOutNum = isl_schedule_node_band_n_member(Node.get()); 841 std::vector<int> TileSizes(DimOutNum, 1); 842 TileSizes[DimOutNum - 3] = MacroKernelParams.Mc; 843 TileSizes[DimOutNum - 2] = MacroKernelParams.Nc; 844 TileSizes[DimOutNum - 1] = MacroKernelParams.Kc; 845 Node = tileNode(Node, "1st level tiling", TileSizes, 1); 846 Node = Node.parent().parent(); 847 Node = permuteBandNodeDimensions(Node, DimOutNum - 2, DimOutNum - 1); 848 Node = permuteBandNodeDimensions(Node, DimOutNum - 3, DimOutNum - 1); 849 850 // Mark the outermost loop as parallelizable. 851 Node = Node.band_member_set_coincident(0, true); 852 853 return Node.child(0).child(0); 854 } 855 856 /// Get the size of the widest type of the matrix multiplication operands 857 /// in bytes, including alignment padding. 858 /// 859 /// @param MMI Parameters of the matrix multiplication operands. 860 /// @return The size of the widest type of the matrix multiplication operands 861 /// in bytes, including alignment padding. 862 static uint64_t getMatMulAlignTypeSize(MatMulInfoTy MMI) { 863 auto *S = MMI.A->getStatement()->getParent(); 864 auto &DL = S->getFunction().getParent()->getDataLayout(); 865 auto ElementSizeA = DL.getTypeAllocSize(MMI.A->getElementType()); 866 auto ElementSizeB = DL.getTypeAllocSize(MMI.B->getElementType()); 867 auto ElementSizeC = DL.getTypeAllocSize(MMI.WriteToC->getElementType()); 868 return std::max({ElementSizeA, ElementSizeB, ElementSizeC}); 869 } 870 871 /// Get the size of the widest type of the matrix multiplication operands 872 /// in bits. 873 /// 874 /// @param MMI Parameters of the matrix multiplication operands. 875 /// @return The size of the widest type of the matrix multiplication operands 876 /// in bits. 877 static uint64_t getMatMulTypeSize(MatMulInfoTy MMI) { 878 auto *S = MMI.A->getStatement()->getParent(); 879 auto &DL = S->getFunction().getParent()->getDataLayout(); 880 auto ElementSizeA = DL.getTypeSizeInBits(MMI.A->getElementType()); 881 auto ElementSizeB = DL.getTypeSizeInBits(MMI.B->getElementType()); 882 auto ElementSizeC = DL.getTypeSizeInBits(MMI.WriteToC->getElementType()); 883 return std::max({ElementSizeA, ElementSizeB, ElementSizeC}); 884 } 885 886 /// Get parameters of the BLIS micro kernel. 887 /// 888 /// We choose the Mr and Nr parameters of the micro kernel to be large enough 889 /// such that no stalls caused by the combination of latencies and dependencies 890 /// are introduced during the updates of the resulting matrix of the matrix 891 /// multiplication. However, they should also be as small as possible to 892 /// release more registers for entries of multiplied matrices. 893 /// 894 /// @param TTI Target Transform Info. 895 /// @param MMI Parameters of the matrix multiplication operands. 896 /// @return The structure of type MicroKernelParamsTy. 897 /// @see MicroKernelParamsTy 898 static struct MicroKernelParamsTy 899 getMicroKernelParams(const TargetTransformInfo *TTI, MatMulInfoTy MMI) { 900 assert(TTI && "The target transform info should be provided."); 901 902 // Nvec - Number of double-precision floating-point numbers that can be hold 903 // by a vector register. Use 2 by default. 904 long RegisterBitwidth = VectorRegisterBitwidth; 905 906 if (RegisterBitwidth == -1) 907 RegisterBitwidth = TTI->getRegisterBitWidth(true); 908 auto ElementSize = getMatMulTypeSize(MMI); 909 assert(ElementSize > 0 && "The element size of the matrix multiplication " 910 "operands should be greater than zero."); 911 auto Nvec = RegisterBitwidth / ElementSize; 912 if (Nvec == 0) 913 Nvec = 2; 914 int Nr = ceil(sqrt((double)(Nvec * LatencyVectorFma * ThroughputVectorFma)) / 915 Nvec) * 916 Nvec; 917 int Mr = ceil((double)(Nvec * LatencyVectorFma * ThroughputVectorFma / Nr)); 918 return {Mr, Nr}; 919 } 920 921 namespace { 922 /// Determine parameters of the target cache. 923 /// 924 /// @param TTI Target Transform Info. 925 void getTargetCacheParameters(const llvm::TargetTransformInfo *TTI) { 926 auto L1DCache = llvm::TargetTransformInfo::CacheLevel::L1D; 927 auto L2DCache = llvm::TargetTransformInfo::CacheLevel::L2D; 928 if (FirstCacheLevelSize == -1) { 929 if (TTI->getCacheSize(L1DCache).hasValue()) 930 FirstCacheLevelSize = TTI->getCacheSize(L1DCache).getValue(); 931 else 932 FirstCacheLevelSize = static_cast<int>(FirstCacheLevelDefaultSize); 933 } 934 if (SecondCacheLevelSize == -1) { 935 if (TTI->getCacheSize(L2DCache).hasValue()) 936 SecondCacheLevelSize = TTI->getCacheSize(L2DCache).getValue(); 937 else 938 SecondCacheLevelSize = static_cast<int>(SecondCacheLevelDefaultSize); 939 } 940 if (FirstCacheLevelAssociativity == -1) { 941 if (TTI->getCacheAssociativity(L1DCache).hasValue()) 942 FirstCacheLevelAssociativity = 943 TTI->getCacheAssociativity(L1DCache).getValue(); 944 else 945 FirstCacheLevelAssociativity = 946 static_cast<int>(FirstCacheLevelDefaultAssociativity); 947 } 948 if (SecondCacheLevelAssociativity == -1) { 949 if (TTI->getCacheAssociativity(L2DCache).hasValue()) 950 SecondCacheLevelAssociativity = 951 TTI->getCacheAssociativity(L2DCache).getValue(); 952 else 953 SecondCacheLevelAssociativity = 954 static_cast<int>(SecondCacheLevelDefaultAssociativity); 955 } 956 } 957 } // namespace 958 959 /// Get parameters of the BLIS macro kernel. 960 /// 961 /// During the computation of matrix multiplication, blocks of partitioned 962 /// matrices are mapped to different layers of the memory hierarchy. 963 /// To optimize data reuse, blocks should be ideally kept in cache between 964 /// iterations. Since parameters of the macro kernel determine sizes of these 965 /// blocks, there are upper and lower bounds on these parameters. 966 /// 967 /// @param TTI Target Transform Info. 968 /// @param MicroKernelParams Parameters of the micro-kernel 969 /// to be taken into account. 970 /// @param MMI Parameters of the matrix multiplication operands. 971 /// @return The structure of type MacroKernelParamsTy. 972 /// @see MacroKernelParamsTy 973 /// @see MicroKernelParamsTy 974 static struct MacroKernelParamsTy 975 getMacroKernelParams(const llvm::TargetTransformInfo *TTI, 976 const MicroKernelParamsTy &MicroKernelParams, 977 MatMulInfoTy MMI) { 978 getTargetCacheParameters(TTI); 979 // According to www.cs.utexas.edu/users/flame/pubs/TOMS-BLIS-Analytical.pdf, 980 // it requires information about the first two levels of a cache to determine 981 // all the parameters of a macro-kernel. It also checks that an associativity 982 // degree of a cache level is greater than two. Otherwise, another algorithm 983 // for determination of the parameters should be used. 984 if (!(MicroKernelParams.Mr > 0 && MicroKernelParams.Nr > 0 && 985 FirstCacheLevelSize > 0 && SecondCacheLevelSize > 0 && 986 FirstCacheLevelAssociativity > 2 && SecondCacheLevelAssociativity > 2)) 987 return {1, 1, 1}; 988 // The quotient should be greater than zero. 989 if (PollyPatternMatchingNcQuotient <= 0) 990 return {1, 1, 1}; 991 int Car = floor( 992 (FirstCacheLevelAssociativity - 1) / 993 (1 + static_cast<double>(MicroKernelParams.Nr) / MicroKernelParams.Mr)); 994 995 // Car can be computed to be zero since it is floor to int. 996 // On Mac OS, division by 0 does not raise a signal. This causes negative 997 // tile sizes to be computed. Prevent division by Cac==0 by early returning 998 // if this happens. 999 if (Car == 0) 1000 return {1, 1, 1}; 1001 1002 auto ElementSize = getMatMulAlignTypeSize(MMI); 1003 assert(ElementSize > 0 && "The element size of the matrix multiplication " 1004 "operands should be greater than zero."); 1005 int Kc = (Car * FirstCacheLevelSize) / 1006 (MicroKernelParams.Mr * FirstCacheLevelAssociativity * ElementSize); 1007 double Cac = 1008 static_cast<double>(Kc * ElementSize * SecondCacheLevelAssociativity) / 1009 SecondCacheLevelSize; 1010 int Mc = floor((SecondCacheLevelAssociativity - 2) / Cac); 1011 int Nc = PollyPatternMatchingNcQuotient * MicroKernelParams.Nr; 1012 1013 assert(Mc > 0 && Nc > 0 && Kc > 0 && 1014 "Matrix block sizes should be greater than zero"); 1015 return {Mc, Nc, Kc}; 1016 } 1017 1018 /// Create an access relation that is specific to 1019 /// the matrix multiplication pattern. 1020 /// 1021 /// Create an access relation of the following form: 1022 /// [O0, O1, O2, O3, O4, O5, O6, O7, O8] -> [OI, O5, OJ] 1023 /// where I is @p FirstDim, J is @p SecondDim. 1024 /// 1025 /// It can be used, for example, to create relations that helps to consequently 1026 /// access elements of operands of a matrix multiplication after creation of 1027 /// the BLIS micro and macro kernels. 1028 /// 1029 /// @see ScheduleTreeOptimizer::createMicroKernel 1030 /// @see ScheduleTreeOptimizer::createMacroKernel 1031 /// 1032 /// Subsequently, the described access relation is applied to the range of 1033 /// @p MapOldIndVar, that is used to map original induction variables to 1034 /// the ones, which are produced by schedule transformations. It helps to 1035 /// define relations using a new space and, at the same time, keep them 1036 /// in the original one. 1037 /// 1038 /// @param MapOldIndVar The relation, which maps original induction variables 1039 /// to the ones, which are produced by schedule 1040 /// transformations. 1041 /// @param FirstDim, SecondDim The input dimensions that are used to define 1042 /// the specified access relation. 1043 /// @return The specified access relation. 1044 isl::map getMatMulAccRel(isl::map MapOldIndVar, unsigned FirstDim, 1045 unsigned SecondDim) { 1046 auto AccessRelSpace = isl::space(MapOldIndVar.get_ctx(), 0, 9, 3); 1047 auto AccessRel = isl::map::universe(AccessRelSpace); 1048 AccessRel = AccessRel.equate(isl::dim::in, FirstDim, isl::dim::out, 0); 1049 AccessRel = AccessRel.equate(isl::dim::in, 5, isl::dim::out, 1); 1050 AccessRel = AccessRel.equate(isl::dim::in, SecondDim, isl::dim::out, 2); 1051 return MapOldIndVar.apply_range(AccessRel); 1052 } 1053 1054 isl::schedule_node createExtensionNode(isl::schedule_node Node, 1055 isl::map ExtensionMap) { 1056 auto Extension = isl::union_map(ExtensionMap); 1057 auto NewNode = isl::schedule_node::from_extension(Extension); 1058 return Node.graft_before(NewNode); 1059 } 1060 1061 /// Apply the packing transformation. 1062 /// 1063 /// The packing transformation can be described as a data-layout 1064 /// transformation that requires to introduce a new array, copy data 1065 /// to the array, and change memory access locations to reference the array. 1066 /// It can be used to ensure that elements of the new array are read in-stride 1067 /// access, aligned to cache lines boundaries, and preloaded into certain cache 1068 /// levels. 1069 /// 1070 /// As an example let us consider the packing of the array A that would help 1071 /// to read its elements with in-stride access. An access to the array A 1072 /// is represented by an access relation that has the form 1073 /// S[i, j, k] -> A[i, k]. The scheduling function of the SCoP statement S has 1074 /// the form S[i,j, k] -> [floor((j mod Nc) / Nr), floor((i mod Mc) / Mr), 1075 /// k mod Kc, j mod Nr, i mod Mr]. 1076 /// 1077 /// To ensure that elements of the array A are read in-stride access, we add 1078 /// a new array Packed_A[Mc/Mr][Kc][Mr] to the SCoP, using 1079 /// Scop::createScopArrayInfo, change the access relation 1080 /// S[i, j, k] -> A[i, k] to 1081 /// S[i, j, k] -> Packed_A[floor((i mod Mc) / Mr), k mod Kc, i mod Mr], using 1082 /// MemoryAccess::setNewAccessRelation, and copy the data to the array, using 1083 /// the copy statement created by Scop::addScopStmt. 1084 /// 1085 /// @param Node The schedule node to be optimized. 1086 /// @param MapOldIndVar The relation, which maps original induction variables 1087 /// to the ones, which are produced by schedule 1088 /// transformations. 1089 /// @param MicroParams, MacroParams Parameters of the BLIS kernel 1090 /// to be taken into account. 1091 /// @param MMI Parameters of the matrix multiplication operands. 1092 /// @return The optimized schedule node. 1093 static isl::schedule_node 1094 optimizeDataLayoutMatrMulPattern(isl::schedule_node Node, isl::map MapOldIndVar, 1095 MicroKernelParamsTy MicroParams, 1096 MacroKernelParamsTy MacroParams, 1097 MatMulInfoTy &MMI) { 1098 auto InputDimsId = MapOldIndVar.get_tuple_id(isl::dim::in); 1099 auto *Stmt = static_cast<ScopStmt *>(InputDimsId.get_user()); 1100 1101 // Create a copy statement that corresponds to the memory access to the 1102 // matrix B, the second operand of the matrix multiplication. 1103 Node = Node.parent().parent().parent().parent().parent().parent(); 1104 Node = isl::manage(isl_schedule_node_band_split(Node.release(), 2)).child(0); 1105 auto AccRel = getMatMulAccRel(MapOldIndVar, 3, 7); 1106 unsigned FirstDimSize = MacroParams.Nc / MicroParams.Nr; 1107 unsigned SecondDimSize = MacroParams.Kc; 1108 unsigned ThirdDimSize = MicroParams.Nr; 1109 auto *SAI = Stmt->getParent()->createScopArrayInfo( 1110 MMI.B->getElementType(), "Packed_B", 1111 {FirstDimSize, SecondDimSize, ThirdDimSize}); 1112 AccRel = AccRel.set_tuple_id(isl::dim::out, SAI->getBasePtrId()); 1113 auto OldAcc = MMI.B->getLatestAccessRelation(); 1114 MMI.B->setNewAccessRelation(AccRel); 1115 auto ExtMap = MapOldIndVar.project_out(isl::dim::out, 2, 1116 MapOldIndVar.dim(isl::dim::out) - 2); 1117 ExtMap = ExtMap.reverse(); 1118 ExtMap = ExtMap.fix_si(isl::dim::out, MMI.i, 0); 1119 auto Domain = Stmt->getDomain(); 1120 1121 // Restrict the domains of the copy statements to only execute when also its 1122 // originating statement is executed. 1123 auto DomainId = Domain.get_tuple_id(); 1124 auto *NewStmt = Stmt->getParent()->addScopStmt( 1125 OldAcc, MMI.B->getLatestAccessRelation(), Domain); 1126 ExtMap = ExtMap.set_tuple_id(isl::dim::out, DomainId); 1127 ExtMap = ExtMap.intersect_range(Domain); 1128 ExtMap = ExtMap.set_tuple_id(isl::dim::out, NewStmt->getDomainId()); 1129 Node = createExtensionNode(Node, ExtMap); 1130 1131 // Create a copy statement that corresponds to the memory access 1132 // to the matrix A, the first operand of the matrix multiplication. 1133 Node = Node.child(0); 1134 AccRel = getMatMulAccRel(MapOldIndVar, 4, 6); 1135 FirstDimSize = MacroParams.Mc / MicroParams.Mr; 1136 ThirdDimSize = MicroParams.Mr; 1137 SAI = Stmt->getParent()->createScopArrayInfo( 1138 MMI.A->getElementType(), "Packed_A", 1139 {FirstDimSize, SecondDimSize, ThirdDimSize}); 1140 AccRel = AccRel.set_tuple_id(isl::dim::out, SAI->getBasePtrId()); 1141 OldAcc = MMI.A->getLatestAccessRelation(); 1142 MMI.A->setNewAccessRelation(AccRel); 1143 ExtMap = MapOldIndVar.project_out(isl::dim::out, 3, 1144 MapOldIndVar.dim(isl::dim::out) - 3); 1145 ExtMap = ExtMap.reverse(); 1146 ExtMap = ExtMap.fix_si(isl::dim::out, MMI.j, 0); 1147 NewStmt = Stmt->getParent()->addScopStmt( 1148 OldAcc, MMI.A->getLatestAccessRelation(), Domain); 1149 1150 // Restrict the domains of the copy statements to only execute when also its 1151 // originating statement is executed. 1152 ExtMap = ExtMap.set_tuple_id(isl::dim::out, DomainId); 1153 ExtMap = ExtMap.intersect_range(Domain); 1154 ExtMap = ExtMap.set_tuple_id(isl::dim::out, NewStmt->getDomainId()); 1155 Node = createExtensionNode(Node, ExtMap); 1156 return Node.child(0).child(0).child(0).child(0).child(0); 1157 } 1158 1159 /// Get a relation mapping induction variables produced by schedule 1160 /// transformations to the original ones. 1161 /// 1162 /// @param Node The schedule node produced as the result of creation 1163 /// of the BLIS kernels. 1164 /// @param MicroKernelParams, MacroKernelParams Parameters of the BLIS kernel 1165 /// to be taken into account. 1166 /// @return The relation mapping original induction variables to the ones 1167 /// produced by schedule transformation. 1168 /// @see ScheduleTreeOptimizer::createMicroKernel 1169 /// @see ScheduleTreeOptimizer::createMacroKernel 1170 /// @see getMacroKernelParams 1171 isl::map 1172 getInductionVariablesSubstitution(isl::schedule_node Node, 1173 MicroKernelParamsTy MicroKernelParams, 1174 MacroKernelParamsTy MacroKernelParams) { 1175 auto Child = Node.child(0); 1176 auto UnMapOldIndVar = Child.get_prefix_schedule_union_map(); 1177 auto MapOldIndVar = isl::map::from_union_map(UnMapOldIndVar); 1178 if (MapOldIndVar.dim(isl::dim::out) > 9) 1179 return MapOldIndVar.project_out(isl::dim::out, 0, 1180 MapOldIndVar.dim(isl::dim::out) - 9); 1181 return MapOldIndVar; 1182 } 1183 1184 /// Isolate a set of partial tile prefixes and unroll the isolated part. 1185 /// 1186 /// The set should ensure that it contains only partial tile prefixes that have 1187 /// exactly Mr x Nr iterations of the two innermost loops produced by 1188 /// the optimization of the matrix multiplication. Mr and Nr are parameters of 1189 /// the micro-kernel. 1190 /// 1191 /// In case of parametric bounds, this helps to auto-vectorize the unrolled 1192 /// innermost loops, using the SLP vectorizer. 1193 /// 1194 /// @param Node The schedule node to be modified. 1195 /// @param MicroKernelParams Parameters of the micro-kernel 1196 /// to be taken into account. 1197 /// @return The modified isl_schedule_node. 1198 static isl::schedule_node 1199 isolateAndUnrollMatMulInnerLoops(isl::schedule_node Node, 1200 struct MicroKernelParamsTy MicroKernelParams) { 1201 isl::schedule_node Child = Node.get_child(0); 1202 isl::union_map UnMapOldIndVar = Child.get_prefix_schedule_relation(); 1203 isl::set Prefix = isl::map::from_union_map(UnMapOldIndVar).range(); 1204 unsigned Dims = Prefix.dim(isl::dim::set); 1205 Prefix = Prefix.project_out(isl::dim::set, Dims - 1, 1); 1206 Prefix = getPartialTilePrefixes(Prefix, MicroKernelParams.Nr); 1207 Prefix = getPartialTilePrefixes(Prefix, MicroKernelParams.Mr); 1208 1209 isl::union_set IsolateOption = 1210 getIsolateOptions(Prefix.add_dims(isl::dim::set, 3), 3); 1211 isl::ctx Ctx = Node.get_ctx(); 1212 auto Options = IsolateOption.unite(getDimOptions(Ctx, "unroll")); 1213 Options = Options.unite(getUnrollIsolatedSetOptions(Ctx)); 1214 Node = Node.band_set_ast_build_options(Options); 1215 Node = Node.parent().parent().parent(); 1216 IsolateOption = getIsolateOptions(Prefix, 3); 1217 Options = IsolateOption.unite(getDimOptions(Ctx, "separate")); 1218 Node = Node.band_set_ast_build_options(Options); 1219 Node = Node.child(0).child(0).child(0); 1220 return Node; 1221 } 1222 1223 /// Mark @p BasePtr with "Inter iteration alias-free" mark node. 1224 /// 1225 /// @param Node The child of the mark node to be inserted. 1226 /// @param BasePtr The pointer to be marked. 1227 /// @return The modified isl_schedule_node. 1228 static isl::schedule_node markInterIterationAliasFree(isl::schedule_node Node, 1229 Value *BasePtr) { 1230 if (!BasePtr) 1231 return Node; 1232 1233 auto Id = 1234 isl::id::alloc(Node.get_ctx(), "Inter iteration alias-free", BasePtr); 1235 return Node.insert_mark(Id).child(0); 1236 } 1237 1238 /// Insert "Loop Vectorizer Disabled" mark node. 1239 /// 1240 /// @param Node The child of the mark node to be inserted. 1241 /// @return The modified isl_schedule_node. 1242 static isl::schedule_node markLoopVectorizerDisabled(isl::schedule_node Node) { 1243 auto Id = isl::id::alloc(Node.get_ctx(), "Loop Vectorizer Disabled", nullptr); 1244 return Node.insert_mark(Id).child(0); 1245 } 1246 1247 /// Restore the initial ordering of dimensions of the band node 1248 /// 1249 /// In case the band node represents all the dimensions of the iteration 1250 /// domain, recreate the band node to restore the initial ordering of the 1251 /// dimensions. 1252 /// 1253 /// @param Node The band node to be modified. 1254 /// @return The modified schedule node. 1255 static isl::schedule_node 1256 getBandNodeWithOriginDimOrder(isl::schedule_node Node) { 1257 assert(isl_schedule_node_get_type(Node.get()) == isl_schedule_node_band); 1258 if (isl_schedule_node_get_type(Node.child(0).get()) != isl_schedule_node_leaf) 1259 return Node; 1260 auto Domain = Node.get_universe_domain(); 1261 assert(isl_union_set_n_set(Domain.get()) == 1); 1262 if (Node.get_schedule_depth() != 0 || 1263 (static_cast<isl_size>(isl::set(Domain).dim(isl::dim::set)) != 1264 isl_schedule_node_band_n_member(Node.get()))) 1265 return Node; 1266 Node = isl::manage(isl_schedule_node_delete(Node.copy())); 1267 auto PartialSchedulePwAff = Domain.identity_union_pw_multi_aff(); 1268 auto PartialScheduleMultiPwAff = 1269 isl::multi_union_pw_aff(PartialSchedulePwAff); 1270 PartialScheduleMultiPwAff = 1271 PartialScheduleMultiPwAff.reset_tuple_id(isl::dim::set); 1272 return Node.insert_partial_schedule(PartialScheduleMultiPwAff); 1273 } 1274 1275 isl::schedule_node 1276 ScheduleTreeOptimizer::optimizeMatMulPattern(isl::schedule_node Node, 1277 const TargetTransformInfo *TTI, 1278 MatMulInfoTy &MMI) { 1279 assert(TTI && "The target transform info should be provided."); 1280 Node = markInterIterationAliasFree( 1281 Node, MMI.WriteToC->getLatestScopArrayInfo()->getBasePtr()); 1282 int DimOutNum = isl_schedule_node_band_n_member(Node.get()); 1283 assert(DimOutNum > 2 && "In case of the matrix multiplication the loop nest " 1284 "and, consequently, the corresponding scheduling " 1285 "functions have at least three dimensions."); 1286 Node = getBandNodeWithOriginDimOrder(Node); 1287 Node = permuteBandNodeDimensions(Node, MMI.i, DimOutNum - 3); 1288 int NewJ = MMI.j == DimOutNum - 3 ? MMI.i : MMI.j; 1289 int NewK = MMI.k == DimOutNum - 3 ? MMI.i : MMI.k; 1290 Node = permuteBandNodeDimensions(Node, NewJ, DimOutNum - 2); 1291 NewK = NewK == DimOutNum - 2 ? NewJ : NewK; 1292 Node = permuteBandNodeDimensions(Node, NewK, DimOutNum - 1); 1293 auto MicroKernelParams = getMicroKernelParams(TTI, MMI); 1294 auto MacroKernelParams = getMacroKernelParams(TTI, MicroKernelParams, MMI); 1295 Node = createMacroKernel(Node, MacroKernelParams); 1296 Node = createMicroKernel(Node, MicroKernelParams); 1297 if (MacroKernelParams.Mc == 1 || MacroKernelParams.Nc == 1 || 1298 MacroKernelParams.Kc == 1) 1299 return Node; 1300 auto MapOldIndVar = getInductionVariablesSubstitution(Node, MicroKernelParams, 1301 MacroKernelParams); 1302 if (!MapOldIndVar) 1303 return Node; 1304 Node = markLoopVectorizerDisabled(Node.parent()).child(0); 1305 Node = isolateAndUnrollMatMulInnerLoops(Node, MicroKernelParams); 1306 return optimizeDataLayoutMatrMulPattern(Node, MapOldIndVar, MicroKernelParams, 1307 MacroKernelParams, MMI); 1308 } 1309 1310 bool ScheduleTreeOptimizer::isMatrMultPattern(isl::schedule_node Node, 1311 const Dependences *D, 1312 MatMulInfoTy &MMI) { 1313 auto PartialSchedule = isl::manage( 1314 isl_schedule_node_band_get_partial_schedule_union_map(Node.get())); 1315 Node = Node.child(0); 1316 auto LeafType = isl_schedule_node_get_type(Node.get()); 1317 Node = Node.parent(); 1318 if (LeafType != isl_schedule_node_leaf || 1319 isl_schedule_node_band_n_member(Node.get()) < 3 || 1320 Node.get_schedule_depth() != 0 || 1321 isl_union_map_n_map(PartialSchedule.get()) != 1) 1322 return false; 1323 auto NewPartialSchedule = isl::map::from_union_map(PartialSchedule); 1324 if (containsMatrMult(NewPartialSchedule, D, MMI)) 1325 return true; 1326 return false; 1327 } 1328 1329 __isl_give isl_schedule_node * 1330 ScheduleTreeOptimizer::optimizeBand(__isl_take isl_schedule_node *Node, 1331 void *User) { 1332 if (!isTileableBandNode(isl::manage_copy(Node))) 1333 return Node; 1334 1335 const OptimizerAdditionalInfoTy *OAI = 1336 static_cast<const OptimizerAdditionalInfoTy *>(User); 1337 1338 MatMulInfoTy MMI; 1339 if (PMBasedOpts && User && 1340 isMatrMultPattern(isl::manage_copy(Node), OAI->D, MMI)) { 1341 LLVM_DEBUG(dbgs() << "The matrix multiplication pattern was detected\n"); 1342 MatMulOpts++; 1343 return optimizeMatMulPattern(isl::manage(Node), OAI->TTI, MMI).release(); 1344 } 1345 1346 return standardBandOpts(isl::manage(Node), User).release(); 1347 } 1348 1349 isl::schedule 1350 ScheduleTreeOptimizer::optimizeSchedule(isl::schedule Schedule, 1351 const OptimizerAdditionalInfoTy *OAI) { 1352 auto Root = Schedule.get_root(); 1353 Root = optimizeScheduleNode(Root, OAI); 1354 return Root.get_schedule(); 1355 } 1356 1357 isl::schedule_node ScheduleTreeOptimizer::optimizeScheduleNode( 1358 isl::schedule_node Node, const OptimizerAdditionalInfoTy *OAI) { 1359 Node = isl::manage(isl_schedule_node_map_descendant_bottom_up( 1360 Node.release(), optimizeBand, 1361 const_cast<void *>(static_cast<const void *>(OAI)))); 1362 return Node; 1363 } 1364 1365 bool ScheduleTreeOptimizer::isProfitableSchedule(Scop &S, 1366 isl::schedule NewSchedule) { 1367 // To understand if the schedule has been optimized we check if the schedule 1368 // has changed at all. 1369 // TODO: We can improve this by tracking if any necessarily beneficial 1370 // transformations have been performed. This can e.g. be tiling, loop 1371 // interchange, or ...) We can track this either at the place where the 1372 // transformation has been performed or, in case of automatic ILP based 1373 // optimizations, by comparing (yet to be defined) performance metrics 1374 // before/after the scheduling optimizer 1375 // (e.g., #stride-one accesses) 1376 auto NewScheduleMap = NewSchedule.get_map(); 1377 auto OldSchedule = S.getSchedule(); 1378 assert(OldSchedule && "Only IslScheduleOptimizer can insert extension nodes " 1379 "that make Scop::getSchedule() return nullptr."); 1380 bool changed = !OldSchedule.is_equal(NewScheduleMap); 1381 return changed; 1382 } 1383 1384 namespace { 1385 1386 class IslScheduleOptimizer : public ScopPass { 1387 public: 1388 static char ID; 1389 1390 explicit IslScheduleOptimizer() : ScopPass(ID) {} 1391 1392 ~IslScheduleOptimizer() override { releaseMemory(); } 1393 1394 /// Optimize the schedule of the SCoP @p S. 1395 bool runOnScop(Scop &S) override; 1396 1397 /// Print the new schedule for the SCoP @p S. 1398 void printScop(raw_ostream &OS, Scop &S) const override; 1399 1400 /// Register all analyses and transformation required. 1401 void getAnalysisUsage(AnalysisUsage &AU) const override; 1402 1403 /// Release the internal memory. 1404 void releaseMemory() override { 1405 isl_schedule_free(LastSchedule); 1406 LastSchedule = nullptr; 1407 IslCtx.reset(); 1408 } 1409 1410 private: 1411 std::shared_ptr<isl_ctx> IslCtx; 1412 isl_schedule *LastSchedule = nullptr; 1413 }; 1414 } // namespace 1415 1416 char IslScheduleOptimizer::ID = 0; 1417 1418 /// Collect statistics for the schedule tree. 1419 /// 1420 /// @param Schedule The schedule tree to analyze. If not a schedule tree it is 1421 /// ignored. 1422 /// @param Version The version of the schedule tree that is analyzed. 1423 /// 0 for the original schedule tree before any transformation. 1424 /// 1 for the schedule tree after isl's rescheduling. 1425 /// 2 for the schedule tree after optimizations are applied 1426 /// (tiling, pattern matching) 1427 static void walkScheduleTreeForStatistics(isl::schedule Schedule, int Version) { 1428 auto Root = Schedule.get_root(); 1429 if (!Root) 1430 return; 1431 1432 isl_schedule_node_foreach_descendant_top_down( 1433 Root.get(), 1434 [](__isl_keep isl_schedule_node *nodeptr, void *user) -> isl_bool { 1435 isl::schedule_node Node = isl::manage_copy(nodeptr); 1436 int Version = *static_cast<int *>(user); 1437 1438 switch (isl_schedule_node_get_type(Node.get())) { 1439 case isl_schedule_node_band: { 1440 NumBands[Version]++; 1441 if (isl_schedule_node_band_get_permutable(Node.get()) == 1442 isl_bool_true) 1443 NumPermutable[Version]++; 1444 1445 int CountMembers = isl_schedule_node_band_n_member(Node.get()); 1446 NumBandMembers[Version] += CountMembers; 1447 for (int i = 0; i < CountMembers; i += 1) { 1448 if (Node.band_member_get_coincident(i)) 1449 NumCoincident[Version]++; 1450 } 1451 break; 1452 } 1453 1454 case isl_schedule_node_filter: 1455 NumFilters[Version]++; 1456 break; 1457 1458 case isl_schedule_node_extension: 1459 NumExtension[Version]++; 1460 break; 1461 1462 default: 1463 break; 1464 } 1465 1466 return isl_bool_true; 1467 }, 1468 &Version); 1469 } 1470 1471 bool IslScheduleOptimizer::runOnScop(Scop &S) { 1472 // Skip SCoPs in case they're already optimised by PPCGCodeGeneration 1473 if (S.isToBeSkipped()) 1474 return false; 1475 1476 // Skip empty SCoPs but still allow code generation as it will delete the 1477 // loops present but not needed. 1478 if (S.getSize() == 0) { 1479 S.markAsOptimized(); 1480 return false; 1481 } 1482 1483 const Dependences &D = 1484 getAnalysis<DependenceInfo>().getDependences(Dependences::AL_Statement); 1485 1486 if (D.getSharedIslCtx() != S.getSharedIslCtx()) { 1487 LLVM_DEBUG(dbgs() << "DependenceInfo for another SCoP/isl_ctx\n"); 1488 return false; 1489 } 1490 1491 if (!D.hasValidDependences()) 1492 return false; 1493 1494 isl_schedule_free(LastSchedule); 1495 LastSchedule = nullptr; 1496 1497 // Build input data. 1498 int ValidityKinds = 1499 Dependences::TYPE_RAW | Dependences::TYPE_WAR | Dependences::TYPE_WAW; 1500 int ProximityKinds; 1501 1502 if (OptimizeDeps == "all") 1503 ProximityKinds = 1504 Dependences::TYPE_RAW | Dependences::TYPE_WAR | Dependences::TYPE_WAW; 1505 else if (OptimizeDeps == "raw") 1506 ProximityKinds = Dependences::TYPE_RAW; 1507 else { 1508 errs() << "Do not know how to optimize for '" << OptimizeDeps << "'" 1509 << " Falling back to optimizing all dependences.\n"; 1510 ProximityKinds = 1511 Dependences::TYPE_RAW | Dependences::TYPE_WAR | Dependences::TYPE_WAW; 1512 } 1513 1514 isl::union_set Domain = S.getDomains(); 1515 1516 if (!Domain) 1517 return false; 1518 1519 ScopsProcessed++; 1520 walkScheduleTreeForStatistics(S.getScheduleTree(), 0); 1521 1522 isl::union_map Validity = D.getDependences(ValidityKinds); 1523 isl::union_map Proximity = D.getDependences(ProximityKinds); 1524 1525 // Simplify the dependences by removing the constraints introduced by the 1526 // domains. This can speed up the scheduling time significantly, as large 1527 // constant coefficients will be removed from the dependences. The 1528 // introduction of some additional dependences reduces the possible 1529 // transformations, but in most cases, such transformation do not seem to be 1530 // interesting anyway. In some cases this option may stop the scheduler to 1531 // find any schedule. 1532 if (SimplifyDeps == "yes") { 1533 Validity = Validity.gist_domain(Domain); 1534 Validity = Validity.gist_range(Domain); 1535 Proximity = Proximity.gist_domain(Domain); 1536 Proximity = Proximity.gist_range(Domain); 1537 } else if (SimplifyDeps != "no") { 1538 errs() << "warning: Option -polly-opt-simplify-deps should either be 'yes' " 1539 "or 'no'. Falling back to default: 'yes'\n"; 1540 } 1541 1542 LLVM_DEBUG(dbgs() << "\n\nCompute schedule from: "); 1543 LLVM_DEBUG(dbgs() << "Domain := " << Domain << ";\n"); 1544 LLVM_DEBUG(dbgs() << "Proximity := " << Proximity << ";\n"); 1545 LLVM_DEBUG(dbgs() << "Validity := " << Validity << ";\n"); 1546 1547 unsigned IslSerializeSCCs; 1548 1549 if (FusionStrategy == "max") { 1550 IslSerializeSCCs = 0; 1551 } else if (FusionStrategy == "min") { 1552 IslSerializeSCCs = 1; 1553 } else { 1554 errs() << "warning: Unknown fusion strategy. Falling back to maximal " 1555 "fusion.\n"; 1556 IslSerializeSCCs = 0; 1557 } 1558 1559 int IslMaximizeBands; 1560 1561 if (MaximizeBandDepth == "yes") { 1562 IslMaximizeBands = 1; 1563 } else if (MaximizeBandDepth == "no") { 1564 IslMaximizeBands = 0; 1565 } else { 1566 errs() << "warning: Option -polly-opt-maximize-bands should either be 'yes'" 1567 " or 'no'. Falling back to default: 'yes'\n"; 1568 IslMaximizeBands = 1; 1569 } 1570 1571 int IslOuterCoincidence; 1572 1573 if (OuterCoincidence == "yes") { 1574 IslOuterCoincidence = 1; 1575 } else if (OuterCoincidence == "no") { 1576 IslOuterCoincidence = 0; 1577 } else { 1578 errs() << "warning: Option -polly-opt-outer-coincidence should either be " 1579 "'yes' or 'no'. Falling back to default: 'no'\n"; 1580 IslOuterCoincidence = 0; 1581 } 1582 1583 isl_ctx *Ctx = S.getIslCtx().get(); 1584 1585 isl_options_set_schedule_outer_coincidence(Ctx, IslOuterCoincidence); 1586 isl_options_set_schedule_serialize_sccs(Ctx, IslSerializeSCCs); 1587 isl_options_set_schedule_maximize_band_depth(Ctx, IslMaximizeBands); 1588 isl_options_set_schedule_max_constant_term(Ctx, MaxConstantTerm); 1589 isl_options_set_schedule_max_coefficient(Ctx, MaxCoefficient); 1590 isl_options_set_tile_scale_tile_loops(Ctx, 0); 1591 1592 auto OnErrorStatus = isl_options_get_on_error(Ctx); 1593 isl_options_set_on_error(Ctx, ISL_ON_ERROR_CONTINUE); 1594 1595 auto SC = isl::schedule_constraints::on_domain(Domain); 1596 SC = SC.set_proximity(Proximity); 1597 SC = SC.set_validity(Validity); 1598 SC = SC.set_coincidence(Validity); 1599 auto Schedule = SC.compute_schedule(); 1600 isl_options_set_on_error(Ctx, OnErrorStatus); 1601 1602 walkScheduleTreeForStatistics(Schedule, 1); 1603 1604 // In cases the scheduler is not able to optimize the code, we just do not 1605 // touch the schedule. 1606 if (!Schedule) 1607 return false; 1608 1609 ScopsRescheduled++; 1610 1611 LLVM_DEBUG({ 1612 auto *P = isl_printer_to_str(Ctx); 1613 P = isl_printer_set_yaml_style(P, ISL_YAML_STYLE_BLOCK); 1614 P = isl_printer_print_schedule(P, Schedule.get()); 1615 auto *str = isl_printer_get_str(P); 1616 dbgs() << "NewScheduleTree: \n" << str << "\n"; 1617 free(str); 1618 isl_printer_free(P); 1619 }); 1620 1621 Function &F = S.getFunction(); 1622 auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 1623 const OptimizerAdditionalInfoTy OAI = {TTI, const_cast<Dependences *>(&D)}; 1624 auto NewSchedule = ScheduleTreeOptimizer::optimizeSchedule(Schedule, &OAI); 1625 NewSchedule = hoistExtensionNodes(NewSchedule); 1626 walkScheduleTreeForStatistics(NewSchedule, 2); 1627 1628 if (!ScheduleTreeOptimizer::isProfitableSchedule(S, NewSchedule)) 1629 return false; 1630 1631 auto ScopStats = S.getStatistics(); 1632 ScopsOptimized++; 1633 NumAffineLoopsOptimized += ScopStats.NumAffineLoops; 1634 NumBoxedLoopsOptimized += ScopStats.NumBoxedLoops; 1635 LastSchedule = NewSchedule.copy(); 1636 IslCtx = S.getSharedIslCtx(); 1637 1638 S.setScheduleTree(NewSchedule); 1639 S.markAsOptimized(); 1640 1641 if (OptimizedScops) 1642 errs() << S; 1643 1644 return false; 1645 } 1646 1647 void IslScheduleOptimizer::printScop(raw_ostream &OS, Scop &) const { 1648 isl_printer *p; 1649 char *ScheduleStr; 1650 1651 OS << "Calculated schedule:\n"; 1652 1653 if (!LastSchedule) { 1654 OS << "n/a\n"; 1655 return; 1656 } 1657 1658 p = isl_printer_to_str(isl_schedule_get_ctx(LastSchedule)); 1659 p = isl_printer_set_yaml_style(p, ISL_YAML_STYLE_BLOCK); 1660 p = isl_printer_print_schedule(p, LastSchedule); 1661 ScheduleStr = isl_printer_get_str(p); 1662 isl_printer_free(p); 1663 1664 OS << ScheduleStr << "\n"; 1665 } 1666 1667 void IslScheduleOptimizer::getAnalysisUsage(AnalysisUsage &AU) const { 1668 ScopPass::getAnalysisUsage(AU); 1669 AU.addRequired<DependenceInfo>(); 1670 AU.addRequired<TargetTransformInfoWrapperPass>(); 1671 1672 AU.addPreserved<DependenceInfo>(); 1673 } 1674 1675 Pass *polly::createIslScheduleOptimizerPass() { 1676 return new IslScheduleOptimizer(); 1677 } 1678 1679 INITIALIZE_PASS_BEGIN(IslScheduleOptimizer, "polly-opt-isl", 1680 "Polly - Optimize schedule of SCoP", false, false); 1681 INITIALIZE_PASS_DEPENDENCY(DependenceInfo); 1682 INITIALIZE_PASS_DEPENDENCY(ScopInfoRegionPass); 1683 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass); 1684 INITIALIZE_PASS_END(IslScheduleOptimizer, "polly-opt-isl", 1685 "Polly - Optimize schedule of SCoP", false, false) 1686