1 //===------ Simplify.cpp ----------------------------------------*- C++ -*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // Simplify a SCoP by removing unnecessary statements and accesses. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "polly/Simplify.h" 15 #include "polly/ScopInfo.h" 16 #include "polly/ScopPass.h" 17 #include "polly/Support/GICHelper.h" 18 #include "polly/Support/ISLOStream.h" 19 #include "polly/Support/ISLTools.h" 20 #include "polly/Support/VirtualInstruction.h" 21 #include "llvm/ADT/Statistic.h" 22 #include "llvm/Support/Debug.h" 23 #define DEBUG_TYPE "polly-simplify" 24 25 using namespace llvm; 26 using namespace polly; 27 28 namespace { 29 30 /// Number of max disjuncts we allow in removeOverwrites(). This is to avoid 31 /// that the analysis of accesses in a statement is becoming too complex. Chosen 32 /// to be relatively small because all the common cases should access only few 33 /// array elements per statement. 34 static int const SimplifyMaxDisjuncts = 4; 35 36 STATISTIC(ScopsProcessed, "Number of SCoPs processed"); 37 STATISTIC(ScopsModified, "Number of SCoPs simplified"); 38 39 STATISTIC(TotalOverwritesRemoved, "Number of removed overwritten writes"); 40 STATISTIC(TotalWritesCoalesced, "Number of writes coalesced with another"); 41 STATISTIC(TotalRedundantWritesRemoved, 42 "Number of writes of same value removed in any SCoP"); 43 STATISTIC(TotalEmptyPartialAccessesRemoved, 44 "Number of empty partial accesses removed"); 45 STATISTIC(TotalDeadAccessesRemoved, "Number of dead accesses removed"); 46 STATISTIC(TotalDeadInstructionsRemoved, 47 "Number of unused instructions removed"); 48 STATISTIC(TotalStmtsRemoved, "Number of statements removed in any SCoP"); 49 50 static bool isImplicitRead(MemoryAccess *MA) { 51 return MA->isRead() && MA->isOriginalScalarKind(); 52 } 53 54 static bool isExplicitAccess(MemoryAccess *MA) { 55 return MA->isOriginalArrayKind(); 56 } 57 58 static bool isImplicitWrite(MemoryAccess *MA) { 59 return MA->isWrite() && MA->isOriginalScalarKind(); 60 } 61 62 /// Like isl::union_map::add_map, but may also return an underapproximated 63 /// result if getting too complex. 64 /// 65 /// This is implemented by adding disjuncts to the results until the limit is 66 /// reached. 67 static isl::union_map underapproximatedAddMap(isl::union_map UMap, 68 isl::map Map) { 69 if (UMap.is_null() || Map.is_null()) 70 return {}; 71 72 isl::map PrevMap = UMap.extract_map(Map.get_space()); 73 74 // Fast path: If known that we cannot exceed the disjunct limit, just add 75 // them. 76 if (isl_map_n_basic_map(PrevMap.get()) + isl_map_n_basic_map(Map.get()) <= 77 SimplifyMaxDisjuncts) 78 return UMap.add_map(Map); 79 80 isl::map Result = isl::map::empty(PrevMap.get_space()); 81 PrevMap.foreach_basic_map([&Result](isl::basic_map BMap) -> isl::stat { 82 if (isl_map_n_basic_map(Result.get()) > SimplifyMaxDisjuncts) 83 return isl::stat::error; 84 Result = Result.unite(BMap); 85 return isl::stat::ok; 86 }); 87 Map.foreach_basic_map([&Result](isl::basic_map BMap) -> isl::stat { 88 if (isl_map_n_basic_map(Result.get()) > SimplifyMaxDisjuncts) 89 return isl::stat::error; 90 Result = Result.unite(BMap); 91 return isl::stat::ok; 92 }); 93 94 isl::union_map UResult = 95 UMap.subtract(isl::map::universe(PrevMap.get_space())); 96 UResult.add_map(Result); 97 98 return UResult; 99 } 100 101 class Simplify : public ScopPass { 102 private: 103 /// The last/current SCoP that is/has been processed. 104 Scop *S; 105 106 /// Number of writes that are overwritten anyway. 107 int OverwritesRemoved = 0; 108 109 /// Number of combined writes. 110 int WritesCoalesced = 0; 111 112 /// Number of redundant writes removed from this SCoP. 113 int RedundantWritesRemoved = 0; 114 115 /// Number of writes with empty access domain removed. 116 int EmptyPartialAccessesRemoved = 0; 117 118 /// Number of unused accesses removed from this SCoP. 119 int DeadAccessesRemoved = 0; 120 121 /// Number of unused instructions removed from this SCoP. 122 int DeadInstructionsRemoved = 0; 123 124 /// Number of unnecessary statements removed from the SCoP. 125 int StmtsRemoved = 0; 126 127 /// Return whether at least one simplification has been applied. 128 bool isModified() const { 129 return OverwritesRemoved > 0 || WritesCoalesced > 0 || 130 RedundantWritesRemoved > 0 || EmptyPartialAccessesRemoved > 0 || 131 DeadAccessesRemoved > 0 || DeadInstructionsRemoved > 0 || 132 StmtsRemoved > 0; 133 } 134 135 /// Remove writes that are overwritten unconditionally later in the same 136 /// statement. 137 /// 138 /// There must be no read of the same value between the write (that is to be 139 /// removed) and the overwrite. 140 void removeOverwrites() { 141 for (auto &Stmt : *S) { 142 isl::set Domain = Stmt.getDomain(); 143 isl::union_map WillBeOverwritten = 144 isl::union_map::empty(S->getParamSpace()); 145 146 SmallVector<MemoryAccess *, 32> Accesses(getAccessesInOrder(Stmt)); 147 148 // Iterate in reverse order, so the overwrite comes before the write that 149 // is to be removed. 150 for (auto *MA : reverse(Accesses)) { 151 152 // In region statements, the explicit accesses can be in blocks that are 153 // can be executed in any order. We therefore process only the implicit 154 // writes and stop after that. 155 if (Stmt.isRegionStmt() && isExplicitAccess(MA)) 156 break; 157 158 auto AccRel = MA->getAccessRelation(); 159 AccRel = AccRel.intersect_domain(Domain); 160 AccRel = AccRel.intersect_params(S->getContext()); 161 162 // If a value is read in-between, do not consider it as overwritten. 163 if (MA->isRead()) { 164 // Invalidate all overwrites for the array it accesses to avoid too 165 // complex isl sets. 166 isl::map AccRelUniv = isl::map::universe(AccRel.get_space()); 167 WillBeOverwritten = WillBeOverwritten.subtract(AccRelUniv); 168 continue; 169 } 170 171 // If all of a write's elements are overwritten, remove it. 172 isl::union_map AccRelUnion = AccRel; 173 if (AccRelUnion.is_subset(WillBeOverwritten)) { 174 DEBUG(dbgs() << "Removing " << MA 175 << " which will be overwritten anyway\n"); 176 177 Stmt.removeSingleMemoryAccess(MA); 178 OverwritesRemoved++; 179 TotalOverwritesRemoved++; 180 } 181 182 // Unconditional writes overwrite other values. 183 if (MA->isMustWrite()) { 184 // Avoid too complex isl sets. If necessary, throw away some of the 185 // knowledge. 186 WillBeOverwritten = 187 underapproximatedAddMap(WillBeOverwritten, AccRel); 188 } 189 } 190 } 191 } 192 193 /// Combine writes that write the same value if possible. 194 /// 195 /// This function is able to combine: 196 /// - Partial writes with disjoint domain. 197 /// - Writes that write to the same array element. 198 /// 199 /// In all cases, both writes must write the same values. 200 void coalesceWrites() { 201 for (auto &Stmt : *S) { 202 isl::set Domain = Stmt.getDomain().intersect_params(S->getContext()); 203 204 // We let isl do the lookup for the same-value condition. For this, we 205 // wrap llvm::Value into an isl::set such that isl can do the lookup in 206 // its hashtable implementation. llvm::Values are only compared within a 207 // ScopStmt, so the map can be local to this scope. TODO: Refactor with 208 // ZoneAlgorithm::makeValueSet() 209 SmallDenseMap<Value *, isl::set> ValueSets; 210 auto makeValueSet = [&ValueSets, this](Value *V) -> isl::set { 211 assert(V); 212 isl::set &Result = ValueSets[V]; 213 if (Result.is_null()) { 214 isl_ctx *Ctx = S->getIslCtx(); 215 std::string Name = 216 getIslCompatibleName("Val", V, ValueSets.size() - 1, 217 std::string(), UseInstructionNames); 218 isl::id Id = give(isl_id_alloc(Ctx, Name.c_str(), V)); 219 Result = isl::set::universe( 220 isl::space(Ctx, 0, 0).set_tuple_id(isl::dim::set, Id)); 221 } 222 return Result; 223 }; 224 225 // List of all eligible (for coalescing) writes of the future. 226 // { [Domain[] -> Element[]] -> [Value[] -> MemoryAccess[]] } 227 isl::union_map FutureWrites = isl::union_map::empty(S->getParamSpace()); 228 229 // Iterate over accesses from the last to the first. 230 SmallVector<MemoryAccess *, 32> Accesses(getAccessesInOrder(Stmt)); 231 for (MemoryAccess *MA : reverse(Accesses)) { 232 // In region statements, the explicit accesses can be in blocks that can 233 // be executed in any order. We therefore process only the implicit 234 // writes and stop after that. 235 if (Stmt.isRegionStmt() && isExplicitAccess(MA)) 236 break; 237 238 // { Domain[] -> Element[] } 239 isl::map AccRel = 240 MA->getLatestAccessRelation().intersect_domain(Domain); 241 242 // { [Domain[] -> Element[]] } 243 isl::set AccRelWrapped = AccRel.wrap(); 244 245 // { Value[] } 246 isl::set ValSet; 247 248 if (MA->isMustWrite() && (MA->isOriginalScalarKind() || 249 isa<StoreInst>(MA->getAccessInstruction()))) { 250 // Normally, tryGetValueStored() should be used to determine which 251 // element is written, but it can return nullptr; For PHI accesses, 252 // getAccessValue() returns the PHI instead of the PHI's incoming 253 // value. In this case, where we only compare values of a single 254 // statement, this is fine, because within a statement, a PHI in a 255 // successor block has always the same value as the incoming write. We 256 // still preferably use the incoming value directly so we also catch 257 // direct uses of that. 258 Value *StoredVal = MA->tryGetValueStored(); 259 if (!StoredVal) 260 StoredVal = MA->getAccessValue(); 261 ValSet = makeValueSet(StoredVal); 262 263 // { Domain[] } 264 isl::set AccDomain = AccRel.domain(); 265 266 // Parts of the statement's domain that is not written by this access. 267 isl::set UndefDomain = Domain.subtract(AccDomain); 268 269 // { Element[] } 270 isl::set ElementUniverse = 271 isl::set::universe(AccRel.get_space().range()); 272 273 // { Domain[] -> Element[] } 274 isl::map UndefAnything = 275 isl::map::from_domain_and_range(UndefDomain, ElementUniverse); 276 277 // We are looking a compatible write access. The other write can 278 // access these elements... 279 isl::map AllowedAccesses = AccRel.unite(UndefAnything); 280 281 // ... and must write the same value. 282 // { [Domain[] -> Element[]] -> Value[] } 283 isl::map Filter = 284 isl::map::from_domain_and_range(AllowedAccesses.wrap(), ValSet); 285 286 // Lookup future write that fulfills these conditions. 287 // { [[Domain[] -> Element[]] -> Value[]] -> MemoryAccess[] } 288 isl::union_map Filtered = 289 FutureWrites.uncurry().intersect_domain(Filter.wrap()); 290 291 // Iterate through the candidates. 292 Filtered.foreach_map([&, this](isl::map Map) -> isl::stat { 293 MemoryAccess *OtherMA = (MemoryAccess *)Map.get_space() 294 .get_tuple_id(isl::dim::out) 295 .get_user(); 296 297 isl::map OtherAccRel = 298 OtherMA->getLatestAccessRelation().intersect_domain(Domain); 299 300 // The filter only guaranteed that some of OtherMA's accessed 301 // elements are allowed. Verify that it only accesses allowed 302 // elements. Otherwise, continue with the next candidate. 303 if (!OtherAccRel.is_subset(AllowedAccesses).is_true()) 304 return isl::stat::ok; 305 306 // The combined access relation. 307 // { Domain[] -> Element[] } 308 isl::map NewAccRel = AccRel.unite(OtherAccRel); 309 simplify(NewAccRel); 310 311 // Carry out the coalescing. 312 Stmt.removeSingleMemoryAccess(MA); 313 OtherMA->setNewAccessRelation(NewAccRel); 314 315 // We removed MA, OtherMA takes its role. 316 MA = OtherMA; 317 318 TotalWritesCoalesced++; 319 WritesCoalesced++; 320 321 // Don't look for more candidates. 322 return isl::stat::error; 323 }); 324 } 325 326 // Two writes cannot be coalesced if there is another access (to some of 327 // the written elements) between them. Remove all visited write accesses 328 // from the list of eligible writes. Don't just remove the accessed 329 // elements, but any MemoryAccess that touches any of the invalidated 330 // elements. 331 SmallPtrSet<MemoryAccess *, 2> TouchedAccesses; 332 FutureWrites.intersect_domain(AccRelWrapped) 333 .foreach_map([&TouchedAccesses](isl::map Map) -> isl::stat { 334 MemoryAccess *MA = (MemoryAccess *)Map.get_space() 335 .range() 336 .unwrap() 337 .get_tuple_id(isl::dim::out) 338 .get_user(); 339 TouchedAccesses.insert(MA); 340 return isl::stat::ok; 341 }); 342 isl::union_map NewFutureWrites = 343 isl::union_map::empty(FutureWrites.get_space()); 344 FutureWrites.foreach_map([&TouchedAccesses, &NewFutureWrites]( 345 isl::map FutureWrite) -> isl::stat { 346 MemoryAccess *MA = (MemoryAccess *)FutureWrite.get_space() 347 .range() 348 .unwrap() 349 .get_tuple_id(isl::dim::out) 350 .get_user(); 351 if (!TouchedAccesses.count(MA)) 352 NewFutureWrites = NewFutureWrites.add_map(FutureWrite); 353 return isl::stat::ok; 354 }); 355 FutureWrites = NewFutureWrites; 356 357 if (MA->isMustWrite() && !ValSet.is_null()) { 358 // { MemoryAccess[] } 359 auto AccSet = 360 isl::set::universe(isl::space(S->getIslCtx(), 0, 0) 361 .set_tuple_id(isl::dim::set, MA->getId())); 362 363 // { Val[] -> MemoryAccess[] } 364 isl::map ValAccSet = isl::map::from_domain_and_range(ValSet, AccSet); 365 366 // { [Domain[] -> Element[]] -> [Value[] -> MemoryAccess[]] } 367 isl::map AccRelValAcc = 368 isl::map::from_domain_and_range(AccRelWrapped, ValAccSet.wrap()); 369 FutureWrites = FutureWrites.add_map(AccRelValAcc); 370 } 371 } 372 } 373 } 374 375 /// Remove writes that just write the same value already stored in the 376 /// element. 377 void removeRedundantWrites() { 378 for (auto &Stmt : *S) { 379 SmallDenseMap<Value *, isl::set> ValueSets; 380 auto makeValueSet = [&ValueSets, this](Value *V) -> isl::set { 381 assert(V); 382 isl::set &Result = ValueSets[V]; 383 if (Result.is_null()) { 384 isl_ctx *Ctx = S->getIslCtx(); 385 std::string Name = 386 getIslCompatibleName("Val", V, ValueSets.size() - 1, 387 std::string(), UseInstructionNames); 388 isl::id Id = give(isl_id_alloc(Ctx, Name.c_str(), V)); 389 Result = isl::set::universe( 390 isl::space(Ctx, 0, 0).set_tuple_id(isl::dim::set, Id)); 391 } 392 return Result; 393 }; 394 395 isl::set Domain = Stmt.getDomain(); 396 Domain = Domain.intersect_params(S->getContext()); 397 398 // List of element reads that still have the same value while iterating 399 // through the MemoryAccesses. 400 // { [Domain[] -> Element[]] -> Val[] } 401 isl::union_map Known = isl::union_map::empty(S->getParamSpace()); 402 403 SmallVector<MemoryAccess *, 32> Accesses(getAccessesInOrder(Stmt)); 404 for (MemoryAccess *MA : Accesses) { 405 // Is the memory access in a defined order relative to the other 406 // accesses? In region statements, only the first and the last accesses 407 // have defined order. Execution of those in the middle may depend on 408 // runtime conditions an therefore cannot be modified. 409 bool IsOrdered = 410 Stmt.isBlockStmt() || MA->isOriginalScalarKind() || 411 (!S->getBoxedLoops().size() && MA->getAccessInstruction() && 412 Stmt.getEntryBlock() == MA->getAccessInstruction()->getParent()); 413 414 isl::map AccRel = MA->getAccessRelation(); 415 AccRel = AccRel.intersect_domain(Domain); 416 isl::set AccRelWrapped = AccRel.wrap(); 417 418 // Determine whether a write is redundant (stores only values that are 419 // already present in the written array elements) and remove it if this 420 // is the case. 421 if (IsOrdered && MA->isMustWrite() && 422 (isa<StoreInst>(MA->getAccessInstruction()) || 423 MA->isOriginalScalarKind())) { 424 Value *StoredVal = MA->tryGetValueStored(); 425 if (!StoredVal) 426 StoredVal = MA->getAccessValue(); 427 428 if (StoredVal) { 429 // Lookup in the set of known values. 430 isl::map AccRelStoredVal = isl::map::from_domain_and_range( 431 AccRelWrapped, makeValueSet(StoredVal)); 432 if (isl::union_map(AccRelStoredVal).is_subset(Known)) { 433 DEBUG(dbgs() << "Cleanup of " << MA << ":\n"); 434 DEBUG(dbgs() << " Scalar: " << *StoredVal << "\n"); 435 DEBUG(dbgs() << " AccRel: " << AccRel << "\n"); 436 437 Stmt.removeSingleMemoryAccess(MA); 438 439 RedundantWritesRemoved++; 440 TotalRedundantWritesRemoved++; 441 } 442 } 443 } 444 445 // Update the know values set. 446 if (MA->isRead()) { 447 // Loaded values are the currently known values of the array element 448 // it was loaded from. 449 Value *LoadedVal = MA->getAccessValue(); 450 if (LoadedVal && IsOrdered) { 451 isl::map AccRelVal = isl::map::from_domain_and_range( 452 AccRelWrapped, makeValueSet(LoadedVal)); 453 454 Known = Known.add_map(AccRelVal); 455 } 456 } else if (MA->isWrite()) { 457 // Remove (possibly) overwritten values from the known elements set. 458 // We remove all elements of the accessed array to avoid too complex 459 // isl sets. 460 isl::set AccRelUniv = isl::set::universe(AccRelWrapped.get_space()); 461 Known = Known.subtract_domain(AccRelUniv); 462 463 // At this point, we could add the written value of must-writes. 464 // However, writing same values is already handled by 465 // coalesceWrites(). 466 } 467 } 468 } 469 } 470 471 /// Remove statements without side effects. 472 void removeUnnecessaryStmts() { 473 auto NumStmtsBefore = S->getSize(); 474 S->simplifySCoP(true); 475 assert(NumStmtsBefore >= S->getSize()); 476 StmtsRemoved = NumStmtsBefore - S->getSize(); 477 DEBUG(dbgs() << "Removed " << StmtsRemoved << " (of " << NumStmtsBefore 478 << ") statements\n"); 479 TotalStmtsRemoved += StmtsRemoved; 480 } 481 482 /// Remove accesses that have an empty domain. 483 void removeEmptyPartialAccesses() { 484 for (ScopStmt &Stmt : *S) { 485 // Defer the actual removal to not invalidate iterators. 486 SmallVector<MemoryAccess *, 8> DeferredRemove; 487 488 for (MemoryAccess *MA : Stmt) { 489 if (!MA->isWrite()) 490 continue; 491 492 isl::map AccRel = MA->getAccessRelation(); 493 if (!AccRel.is_empty().is_true()) 494 continue; 495 496 DEBUG(dbgs() << "Removing " << MA 497 << " because it's a partial access that never occurs\n"); 498 DeferredRemove.push_back(MA); 499 } 500 501 for (MemoryAccess *MA : DeferredRemove) { 502 Stmt.removeSingleMemoryAccess(MA); 503 EmptyPartialAccessesRemoved++; 504 TotalEmptyPartialAccessesRemoved++; 505 } 506 } 507 } 508 509 /// Mark all reachable instructions and access, and sweep those that are not 510 /// reachable. 511 void markAndSweep(LoopInfo *LI) { 512 DenseSet<MemoryAccess *> UsedMA; 513 DenseSet<VirtualInstruction> UsedInsts; 514 515 // Get all reachable instructions and accesses. 516 markReachable(S, LI, UsedInsts, UsedMA); 517 518 // Remove all non-reachable accesses. 519 // We need get all MemoryAccesses first, in order to not invalidate the 520 // iterators when removing them. 521 SmallVector<MemoryAccess *, 64> AllMAs; 522 for (ScopStmt &Stmt : *S) 523 AllMAs.append(Stmt.begin(), Stmt.end()); 524 525 for (MemoryAccess *MA : AllMAs) { 526 if (UsedMA.count(MA)) 527 continue; 528 DEBUG(dbgs() << "Removing " << MA << " because its value is not used\n"); 529 ScopStmt *Stmt = MA->getStatement(); 530 Stmt->removeSingleMemoryAccess(MA); 531 532 DeadAccessesRemoved++; 533 TotalDeadAccessesRemoved++; 534 } 535 536 // Remove all non-reachable instructions. 537 for (ScopStmt &Stmt : *S) { 538 if (!Stmt.isBlockStmt()) 539 continue; 540 541 SmallVector<Instruction *, 32> AllInsts(Stmt.insts_begin(), 542 Stmt.insts_end()); 543 SmallVector<Instruction *, 32> RemainInsts; 544 545 for (Instruction *Inst : AllInsts) { 546 auto It = UsedInsts.find({&Stmt, Inst}); 547 if (It == UsedInsts.end()) { 548 DEBUG(dbgs() << "Removing "; Inst->print(dbgs()); 549 dbgs() << " because it is not used\n"); 550 DeadInstructionsRemoved++; 551 TotalDeadInstructionsRemoved++; 552 continue; 553 } 554 555 RemainInsts.push_back(Inst); 556 557 // If instructions appear multiple times, keep only the first. 558 UsedInsts.erase(It); 559 } 560 561 // Set the new instruction list to be only those we did not remove. 562 Stmt.setInstructions(RemainInsts); 563 } 564 } 565 566 /// Print simplification statistics to @p OS. 567 void printStatistics(llvm::raw_ostream &OS, int Indent = 0) const { 568 OS.indent(Indent) << "Statistics {\n"; 569 OS.indent(Indent + 4) << "Overwrites removed: " << OverwritesRemoved 570 << '\n'; 571 OS.indent(Indent + 4) << "Partial writes coalesced: " << WritesCoalesced 572 << "\n"; 573 OS.indent(Indent + 4) << "Redundant writes removed: " 574 << RedundantWritesRemoved << "\n"; 575 OS.indent(Indent + 4) << "Accesses with empty domains removed: " 576 << EmptyPartialAccessesRemoved << "\n"; 577 OS.indent(Indent + 4) << "Dead accesses removed: " << DeadAccessesRemoved 578 << '\n'; 579 OS.indent(Indent + 4) << "Dead instructions removed: " 580 << DeadInstructionsRemoved << '\n'; 581 OS.indent(Indent + 4) << "Stmts removed: " << StmtsRemoved << "\n"; 582 OS.indent(Indent) << "}\n"; 583 } 584 585 /// Print the current state of all MemoryAccesses to @p OS. 586 void printAccesses(llvm::raw_ostream &OS, int Indent = 0) const { 587 OS.indent(Indent) << "After accesses {\n"; 588 for (auto &Stmt : *S) { 589 OS.indent(Indent + 4) << Stmt.getBaseName() << "\n"; 590 for (auto *MA : Stmt) 591 MA->print(OS); 592 } 593 OS.indent(Indent) << "}\n"; 594 } 595 596 public: 597 static char ID; 598 explicit Simplify() : ScopPass(ID) {} 599 600 virtual void getAnalysisUsage(AnalysisUsage &AU) const override { 601 AU.addRequiredTransitive<ScopInfoRegionPass>(); 602 AU.addRequired<LoopInfoWrapperPass>(); 603 AU.setPreservesAll(); 604 } 605 606 virtual bool runOnScop(Scop &S) override { 607 // Reset statistics of last processed SCoP. 608 releaseMemory(); 609 assert(!isModified()); 610 611 // Prepare processing of this SCoP. 612 this->S = &S; 613 ScopsProcessed++; 614 615 DEBUG(dbgs() << "Removing partial writes that never happen...\n"); 616 removeEmptyPartialAccesses(); 617 618 DEBUG(dbgs() << "Removing overwrites...\n"); 619 removeOverwrites(); 620 621 DEBUG(dbgs() << "Coalesce partial writes...\n"); 622 coalesceWrites(); 623 624 DEBUG(dbgs() << "Removing redundant writes...\n"); 625 removeRedundantWrites(); 626 627 DEBUG(dbgs() << "Cleanup unused accesses...\n"); 628 LoopInfo *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 629 markAndSweep(LI); 630 631 DEBUG(dbgs() << "Removing statements without side effects...\n"); 632 removeUnnecessaryStmts(); 633 634 if (isModified()) 635 ScopsModified++; 636 DEBUG(dbgs() << "\nFinal Scop:\n"); 637 DEBUG(dbgs() << S); 638 639 return false; 640 } 641 642 virtual void printScop(raw_ostream &OS, Scop &S) const override { 643 assert(&S == this->S && 644 "Can only print analysis for the last processed SCoP"); 645 printStatistics(OS); 646 647 if (!isModified()) { 648 OS << "SCoP could not be simplified\n"; 649 return; 650 } 651 printAccesses(OS); 652 } 653 654 virtual void releaseMemory() override { 655 S = nullptr; 656 657 OverwritesRemoved = 0; 658 WritesCoalesced = 0; 659 RedundantWritesRemoved = 0; 660 EmptyPartialAccessesRemoved = 0; 661 DeadAccessesRemoved = 0; 662 DeadInstructionsRemoved = 0; 663 StmtsRemoved = 0; 664 } 665 }; 666 667 char Simplify::ID; 668 } // anonymous namespace 669 670 namespace polly { 671 SmallVector<MemoryAccess *, 32> getAccessesInOrder(ScopStmt &Stmt) { 672 673 SmallVector<MemoryAccess *, 32> Accesses; 674 675 for (MemoryAccess *MemAcc : Stmt) 676 if (isImplicitRead(MemAcc)) 677 Accesses.push_back(MemAcc); 678 679 for (MemoryAccess *MemAcc : Stmt) 680 if (isExplicitAccess(MemAcc)) 681 Accesses.push_back(MemAcc); 682 683 for (MemoryAccess *MemAcc : Stmt) 684 if (isImplicitWrite(MemAcc)) 685 Accesses.push_back(MemAcc); 686 687 return Accesses; 688 } 689 } // namespace polly 690 691 Pass *polly::createSimplifyPass() { return new Simplify(); } 692 693 INITIALIZE_PASS_BEGIN(Simplify, "polly-simplify", "Polly - Simplify", false, 694 false) 695 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 696 INITIALIZE_PASS_END(Simplify, "polly-simplify", "Polly - Simplify", false, 697 false) 698