1 //===------ ZoneAlgo.cpp ----------------------------------------*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // Derive information about array elements between statements ("Zones"). 10 // 11 // The algorithms here work on the scatter space - the image space of the 12 // schedule returned by Scop::getSchedule(). We call an element in that space a 13 // "timepoint". Timepoints are lexicographically ordered such that we can 14 // defined ranges in the scatter space. We use two flavors of such ranges: 15 // Timepoint sets and zones. A timepoint set is simply a subset of the scatter 16 // space and is directly stored as isl_set. 17 // 18 // Zones are used to describe the space between timepoints as open sets, i.e. 19 // they do not contain the extrema. Using isl rational sets to express these 20 // would be overkill. We also cannot store them as the integer timepoints they 21 // contain; the (nonempty) zone between 1 and 2 would be empty and 22 // indistinguishable from e.g. the zone between 3 and 4. Also, we cannot store 23 // the integer set including the extrema; the set ]1,2[ + ]3,4[ could be 24 // coalesced to ]1,3[, although we defined the range [2,3] to be not in the set. 25 // Instead, we store the "half-open" integer extrema, including the lower bound, 26 // but excluding the upper bound. Examples: 27 // 28 // * The set { [i] : 1 <= i <= 3 } represents the zone ]0,3[ (which contains the 29 // integer points 1 and 2, but not 0 or 3) 30 // 31 // * { [1] } represents the zone ]0,1[ 32 // 33 // * { [i] : i = 1 or i = 3 } represents the zone ]0,1[ + ]2,3[ 34 // 35 // Therefore, an integer i in the set represents the zone ]i-1,i[, i.e. strictly 36 // speaking the integer points never belong to the zone. However, depending an 37 // the interpretation, one might want to include them. Part of the 38 // interpretation may not be known when the zone is constructed. 39 // 40 // Reads are assumed to always take place before writes, hence we can think of 41 // reads taking place at the beginning of a timepoint and writes at the end. 42 // 43 // Let's assume that the zone represents the lifetime of a variable. That is, 44 // the zone begins with a write that defines the value during its lifetime and 45 // ends with the last read of that value. In the following we consider whether a 46 // read/write at the beginning/ending of the lifetime zone should be within the 47 // zone or outside of it. 48 // 49 // * A read at the timepoint that starts the live-range loads the previous 50 // value. Hence, exclude the timepoint starting the zone. 51 // 52 // * A write at the timepoint that starts the live-range is not defined whether 53 // it occurs before or after the write that starts the lifetime. We do not 54 // allow this situation to occur. Hence, we include the timepoint starting the 55 // zone to determine whether they are conflicting. 56 // 57 // * A read at the timepoint that ends the live-range reads the same variable. 58 // We include the timepoint at the end of the zone to include that read into 59 // the live-range. Doing otherwise would mean that the two reads access 60 // different values, which would mean that the value they read are both alive 61 // at the same time but occupy the same variable. 62 // 63 // * A write at the timepoint that ends the live-range starts a new live-range. 64 // It must not be included in the live-range of the previous definition. 65 // 66 // All combinations of reads and writes at the endpoints are possible, but most 67 // of the time only the write->read (for instance, a live-range from definition 68 // to last use) and read->write (for instance, an unused range from last use to 69 // overwrite) and combinations are interesting (half-open ranges). write->write 70 // zones might be useful as well in some context to represent 71 // output-dependencies. 72 // 73 // @see convertZoneToTimepoints 74 // 75 // 76 // The code makes use of maps and sets in many different spaces. To not loose 77 // track in which space a set or map is expected to be in, variables holding an 78 // isl reference are usually annotated in the comments. They roughly follow isl 79 // syntax for spaces, but only the tuples, not the dimensions. The tuples have a 80 // meaning as follows: 81 // 82 // * Space[] - An unspecified tuple. Used for function parameters such that the 83 // function caller can use it for anything they like. 84 // 85 // * Domain[] - A statement instance as returned by ScopStmt::getDomain() 86 // isl_id_get_name: Stmt_<NameOfBasicBlock> 87 // isl_id_get_user: Pointer to ScopStmt 88 // 89 // * Element[] - An array element as in the range part of 90 // MemoryAccess::getAccessRelation() 91 // isl_id_get_name: MemRef_<NameOfArrayVariable> 92 // isl_id_get_user: Pointer to ScopArrayInfo 93 // 94 // * Scatter[] - Scatter space or space of timepoints 95 // Has no tuple id 96 // 97 // * Zone[] - Range between timepoints as described above 98 // Has no tuple id 99 // 100 // * ValInst[] - An llvm::Value as defined at a specific timepoint. 101 // 102 // A ValInst[] itself can be structured as one of: 103 // 104 // * [] - An unknown value. 105 // Always zero dimensions 106 // Has no tuple id 107 // 108 // * Value[] - An llvm::Value that is read-only in the SCoP, i.e. its 109 // runtime content does not depend on the timepoint. 110 // Always zero dimensions 111 // isl_id_get_name: Val_<NameOfValue> 112 // isl_id_get_user: A pointer to an llvm::Value 113 // 114 // * SCEV[...] - A synthesizable llvm::SCEV Expression. 115 // In contrast to a Value[] is has at least one dimension per 116 // SCEVAddRecExpr in the SCEV. 117 // 118 // * [Domain[] -> Value[]] - An llvm::Value that may change during the 119 // Scop's execution. 120 // The tuple itself has no id, but it wraps a map space holding a 121 // statement instance which defines the llvm::Value as the map's domain 122 // and llvm::Value itself as range. 123 // 124 // @see makeValInst() 125 // 126 // An annotation "{ Domain[] -> Scatter[] }" therefore means: A map from a 127 // statement instance to a timepoint, aka a schedule. There is only one scatter 128 // space, but most of the time multiple statements are processed in one set. 129 // This is why most of the time isl_union_map has to be used. 130 // 131 // The basic algorithm works as follows: 132 // At first we verify that the SCoP is compatible with this technique. For 133 // instance, two writes cannot write to the same location at the same statement 134 // instance because we cannot determine within the polyhedral model which one 135 // comes first. Once this was verified, we compute zones at which an array 136 // element is unused. This computation can fail if it takes too long. Then the 137 // main algorithm is executed. Because every store potentially trails an unused 138 // zone, we start at stores. We search for a scalar (MemoryKind::Value or 139 // MemoryKind::PHI) that we can map to the array element overwritten by the 140 // store, preferably one that is used by the store or at least the ScopStmt. 141 // When it does not conflict with the lifetime of the values in the array 142 // element, the map is applied and the unused zone updated as it is now used. We 143 // continue to try to map scalars to the array element until there are no more 144 // candidates to map. The algorithm is greedy in the sense that the first scalar 145 // not conflicting will be mapped. Other scalars processed later that could have 146 // fit the same unused zone will be rejected. As such the result depends on the 147 // processing order. 148 // 149 //===----------------------------------------------------------------------===// 150 151 #include "polly/ZoneAlgo.h" 152 #include "polly/ScopInfo.h" 153 #include "polly/Support/GICHelper.h" 154 #include "polly/Support/ISLTools.h" 155 #include "polly/Support/VirtualInstruction.h" 156 #include "llvm/ADT/Statistic.h" 157 #include "llvm/Support/raw_ostream.h" 158 159 #define DEBUG_TYPE "polly-zone" 160 161 STATISTIC(NumIncompatibleArrays, "Number of not zone-analyzable arrays"); 162 STATISTIC(NumCompatibleArrays, "Number of zone-analyzable arrays"); 163 STATISTIC(NumRecursivePHIs, "Number of recursive PHIs"); 164 STATISTIC(NumNormalizablePHIs, "Number of normalizable PHIs"); 165 STATISTIC(NumPHINormialization, "Number of PHI executed normalizations"); 166 167 using namespace polly; 168 using namespace llvm; 169 170 static isl::union_map computeReachingDefinition(isl::union_map Schedule, 171 isl::union_map Writes, 172 bool InclDef, bool InclRedef) { 173 return computeReachingWrite(Schedule, Writes, false, InclDef, InclRedef); 174 } 175 176 /// Compute the reaching definition of a scalar. 177 /// 178 /// Compared to computeReachingDefinition, there is just one element which is 179 /// accessed and therefore only a set if instances that accesses that element is 180 /// required. 181 /// 182 /// @param Schedule { DomainWrite[] -> Scatter[] } 183 /// @param Writes { DomainWrite[] } 184 /// @param InclDef Include the timepoint of the definition to the result. 185 /// @param InclRedef Include the timepoint of the overwrite into the result. 186 /// 187 /// @return { Scatter[] -> DomainWrite[] } 188 static isl::union_map computeScalarReachingDefinition(isl::union_map Schedule, 189 isl::union_set Writes, 190 bool InclDef, 191 bool InclRedef) { 192 // { DomainWrite[] -> Element[] } 193 isl::union_map Defs = isl::union_map::from_domain(Writes); 194 195 // { [Element[] -> Scatter[]] -> DomainWrite[] } 196 auto ReachDefs = 197 computeReachingDefinition(Schedule, Defs, InclDef, InclRedef); 198 199 // { Scatter[] -> DomainWrite[] } 200 return ReachDefs.curry().range().unwrap(); 201 } 202 203 /// Compute the reaching definition of a scalar. 204 /// 205 /// This overload accepts only a single writing statement as an isl_map, 206 /// consequently the result also is only a single isl_map. 207 /// 208 /// @param Schedule { DomainWrite[] -> Scatter[] } 209 /// @param Writes { DomainWrite[] } 210 /// @param InclDef Include the timepoint of the definition to the result. 211 /// @param InclRedef Include the timepoint of the overwrite into the result. 212 /// 213 /// @return { Scatter[] -> DomainWrite[] } 214 static isl::map computeScalarReachingDefinition(isl::union_map Schedule, 215 isl::set Writes, bool InclDef, 216 bool InclRedef) { 217 isl::space DomainSpace = Writes.get_space(); 218 isl::space ScatterSpace = getScatterSpace(Schedule); 219 220 // { Scatter[] -> DomainWrite[] } 221 isl::union_map UMap = computeScalarReachingDefinition( 222 Schedule, isl::union_set(Writes), InclDef, InclRedef); 223 224 isl::space ResultSpace = ScatterSpace.map_from_domain_and_range(DomainSpace); 225 return singleton(UMap, ResultSpace); 226 } 227 228 isl::union_map polly::makeUnknownForDomain(isl::union_set Domain) { 229 return isl::union_map::from_domain(Domain); 230 } 231 232 /// Create a domain-to-unknown value mapping. 233 /// 234 /// @see makeUnknownForDomain(isl::union_set) 235 /// 236 /// @param Domain { Domain[] } 237 /// 238 /// @return { Domain[] -> ValInst[] } 239 static isl::map makeUnknownForDomain(isl::set Domain) { 240 return isl::map::from_domain(Domain); 241 } 242 243 /// Return whether @p Map maps to an unknown value. 244 /// 245 /// @param { [] -> ValInst[] } 246 static bool isMapToUnknown(const isl::map &Map) { 247 isl::space Space = Map.get_space().range(); 248 return Space.has_tuple_id(isl::dim::set).is_false() && 249 Space.is_wrapping().is_false() && Space.dim(isl::dim::set) == 0; 250 } 251 252 isl::union_map polly::filterKnownValInst(const isl::union_map &UMap) { 253 isl::union_map Result = isl::union_map::empty(UMap.get_space()); 254 for (isl::map Map : UMap.get_map_list()) { 255 if (!isMapToUnknown(Map)) 256 Result = Result.add_map(Map); 257 } 258 return Result; 259 } 260 261 ZoneAlgorithm::ZoneAlgorithm(const char *PassName, Scop *S, LoopInfo *LI) 262 : PassName(PassName), IslCtx(S->getSharedIslCtx()), S(S), LI(LI), 263 Schedule(S->getSchedule()) { 264 auto Domains = S->getDomains(); 265 266 Schedule = Schedule.intersect_domain(Domains); 267 ParamSpace = Schedule.get_space(); 268 ScatterSpace = getScatterSpace(Schedule); 269 } 270 271 /// Check if all stores in @p Stmt store the very same value. 272 /// 273 /// This covers a special situation occurring in Polybench's 274 /// covariance/correlation (which is typical for algorithms that cover symmetric 275 /// matrices): 276 /// 277 /// for (int i = 0; i < n; i += 1) 278 /// for (int j = 0; j <= i; j += 1) { 279 /// double x = ...; 280 /// C[i][j] = x; 281 /// C[j][i] = x; 282 /// } 283 /// 284 /// For i == j, the same value is written twice to the same element.Double 285 /// writes to the same element are not allowed in DeLICM because its algorithm 286 /// does not see which of the writes is effective.But if its the same value 287 /// anyway, it doesn't matter. 288 /// 289 /// LLVM passes, however, cannot simplify this because the write is necessary 290 /// for i != j (unless it would add a condition for one of the writes to occur 291 /// only if i != j). 292 /// 293 /// TODO: In the future we may want to extent this to make the checks 294 /// specific to different memory locations. 295 static bool onlySameValueWrites(ScopStmt *Stmt) { 296 Value *V = nullptr; 297 298 for (auto *MA : *Stmt) { 299 if (!MA->isLatestArrayKind() || !MA->isMustWrite() || 300 !MA->isOriginalArrayKind()) 301 continue; 302 303 if (!V) { 304 V = MA->getAccessValue(); 305 continue; 306 } 307 308 if (V != MA->getAccessValue()) 309 return false; 310 } 311 return true; 312 } 313 314 /// Is @p InnerLoop nested inside @p OuterLoop? 315 static bool isInsideLoop(Loop *OuterLoop, Loop *InnerLoop) { 316 // If OuterLoop is nullptr, we cannot call its contains() method. In this case 317 // OuterLoop represents the 'top level' and therefore contains all loop. 318 return !OuterLoop || OuterLoop->contains(InnerLoop); 319 } 320 321 void ZoneAlgorithm::collectIncompatibleElts(ScopStmt *Stmt, 322 isl::union_set &IncompatibleElts, 323 isl::union_set &AllElts) { 324 auto Stores = makeEmptyUnionMap(); 325 auto Loads = makeEmptyUnionMap(); 326 327 // This assumes that the MemoryKind::Array MemoryAccesses are iterated in 328 // order. 329 for (auto *MA : *Stmt) { 330 if (!MA->isOriginalArrayKind()) 331 continue; 332 333 isl::map AccRelMap = getAccessRelationFor(MA); 334 isl::union_map AccRel = AccRelMap; 335 336 // To avoid solving any ILP problems, always add entire arrays instead of 337 // just the elements that are accessed. 338 auto ArrayElts = isl::set::universe(AccRelMap.get_space().range()); 339 AllElts = AllElts.add_set(ArrayElts); 340 341 if (MA->isRead()) { 342 // Reject load after store to same location. 343 if (!Stores.is_disjoint(AccRel)) { 344 LLVM_DEBUG( 345 dbgs() << "Load after store of same element in same statement\n"); 346 OptimizationRemarkMissed R(PassName, "LoadAfterStore", 347 MA->getAccessInstruction()); 348 R << "load after store of same element in same statement"; 349 R << " (previous stores: " << Stores; 350 R << ", loading: " << AccRel << ")"; 351 S->getFunction().getContext().diagnose(R); 352 353 IncompatibleElts = IncompatibleElts.add_set(ArrayElts); 354 } 355 356 Loads = Loads.unite(AccRel); 357 358 continue; 359 } 360 361 // In region statements the order is less clear, eg. the load and store 362 // might be in a boxed loop. 363 if (Stmt->isRegionStmt() && !Loads.is_disjoint(AccRel)) { 364 LLVM_DEBUG(dbgs() << "WRITE in non-affine subregion not supported\n"); 365 OptimizationRemarkMissed R(PassName, "StoreInSubregion", 366 MA->getAccessInstruction()); 367 R << "store is in a non-affine subregion"; 368 S->getFunction().getContext().diagnose(R); 369 370 IncompatibleElts = IncompatibleElts.add_set(ArrayElts); 371 } 372 373 // Do not allow more than one store to the same location. 374 if (!Stores.is_disjoint(AccRel) && !onlySameValueWrites(Stmt)) { 375 LLVM_DEBUG(dbgs() << "WRITE after WRITE to same element\n"); 376 OptimizationRemarkMissed R(PassName, "StoreAfterStore", 377 MA->getAccessInstruction()); 378 R << "store after store of same element in same statement"; 379 R << " (previous stores: " << Stores; 380 R << ", storing: " << AccRel << ")"; 381 S->getFunction().getContext().diagnose(R); 382 383 IncompatibleElts = IncompatibleElts.add_set(ArrayElts); 384 } 385 386 Stores = Stores.unite(AccRel); 387 } 388 } 389 390 void ZoneAlgorithm::addArrayReadAccess(MemoryAccess *MA) { 391 assert(MA->isLatestArrayKind()); 392 assert(MA->isRead()); 393 ScopStmt *Stmt = MA->getStatement(); 394 395 // { DomainRead[] -> Element[] } 396 auto AccRel = intersectRange(getAccessRelationFor(MA), CompatibleElts); 397 AllReads = AllReads.add_map(AccRel); 398 399 if (LoadInst *Load = dyn_cast_or_null<LoadInst>(MA->getAccessInstruction())) { 400 // { DomainRead[] -> ValInst[] } 401 isl::map LoadValInst = makeValInst( 402 Load, Stmt, LI->getLoopFor(Load->getParent()), Stmt->isBlockStmt()); 403 404 // { DomainRead[] -> [Element[] -> DomainRead[]] } 405 isl::map IncludeElement = AccRel.domain_map().curry(); 406 407 // { [Element[] -> DomainRead[]] -> ValInst[] } 408 isl::map EltLoadValInst = LoadValInst.apply_domain(IncludeElement); 409 410 AllReadValInst = AllReadValInst.add_map(EltLoadValInst); 411 } 412 } 413 414 isl::union_map ZoneAlgorithm::getWrittenValue(MemoryAccess *MA, 415 isl::map AccRel) { 416 if (!MA->isMustWrite()) 417 return {}; 418 419 Value *AccVal = MA->getAccessValue(); 420 ScopStmt *Stmt = MA->getStatement(); 421 Instruction *AccInst = MA->getAccessInstruction(); 422 423 // Write a value to a single element. 424 auto L = MA->isOriginalArrayKind() ? LI->getLoopFor(AccInst->getParent()) 425 : Stmt->getSurroundingLoop(); 426 if (AccVal && 427 AccVal->getType() == MA->getLatestScopArrayInfo()->getElementType() && 428 AccRel.is_single_valued().is_true()) 429 return makeNormalizedValInst(AccVal, Stmt, L); 430 431 // memset(_, '0', ) is equivalent to writing the null value to all touched 432 // elements. isMustWrite() ensures that all of an element's bytes are 433 // overwritten. 434 if (auto *Memset = dyn_cast<MemSetInst>(AccInst)) { 435 auto *WrittenConstant = dyn_cast<Constant>(Memset->getValue()); 436 Type *Ty = MA->getLatestScopArrayInfo()->getElementType(); 437 if (WrittenConstant && WrittenConstant->isZeroValue()) { 438 Constant *Zero = Constant::getNullValue(Ty); 439 return makeNormalizedValInst(Zero, Stmt, L); 440 } 441 } 442 443 return {}; 444 } 445 446 void ZoneAlgorithm::addArrayWriteAccess(MemoryAccess *MA) { 447 assert(MA->isLatestArrayKind()); 448 assert(MA->isWrite()); 449 auto *Stmt = MA->getStatement(); 450 451 // { Domain[] -> Element[] } 452 isl::map AccRel = intersectRange(getAccessRelationFor(MA), CompatibleElts); 453 454 if (MA->isMustWrite()) 455 AllMustWrites = AllMustWrites.add_map(AccRel); 456 457 if (MA->isMayWrite()) 458 AllMayWrites = AllMayWrites.add_map(AccRel); 459 460 // { Domain[] -> ValInst[] } 461 isl::union_map WriteValInstance = getWrittenValue(MA, AccRel); 462 if (!WriteValInstance) 463 WriteValInstance = makeUnknownForDomain(Stmt); 464 465 // { Domain[] -> [Element[] -> Domain[]] } 466 isl::map IncludeElement = AccRel.domain_map().curry(); 467 468 // { [Element[] -> DomainWrite[]] -> ValInst[] } 469 isl::union_map EltWriteValInst = 470 WriteValInstance.apply_domain(IncludeElement); 471 472 AllWriteValInst = AllWriteValInst.unite(EltWriteValInst); 473 } 474 475 /// For an llvm::Value defined in @p DefStmt, compute the RAW dependency for a 476 /// use in every instance of @p UseStmt. 477 /// 478 /// @param UseStmt Statement a scalar is used in. 479 /// @param DefStmt Statement a scalar is defined in. 480 /// 481 /// @return { DomainUse[] -> DomainDef[] } 482 isl::map ZoneAlgorithm::computeUseToDefFlowDependency(ScopStmt *UseStmt, 483 ScopStmt *DefStmt) { 484 // { DomainUse[] -> Scatter[] } 485 isl::map UseScatter = getScatterFor(UseStmt); 486 487 // { Zone[] -> DomainDef[] } 488 isl::map ReachDefZone = getScalarReachingDefinition(DefStmt); 489 490 // { Scatter[] -> DomainDef[] } 491 isl::map ReachDefTimepoints = 492 convertZoneToTimepoints(ReachDefZone, isl::dim::in, false, true); 493 494 // { DomainUse[] -> DomainDef[] } 495 return UseScatter.apply_range(ReachDefTimepoints); 496 } 497 498 /// Return whether @p PHI refers (also transitively through other PHIs) to 499 /// itself. 500 /// 501 /// loop: 502 /// %phi1 = phi [0, %preheader], [%phi1, %loop] 503 /// br i1 %c, label %loop, label %exit 504 /// 505 /// exit: 506 /// %phi2 = phi [%phi1, %bb] 507 /// 508 /// In this example, %phi1 is recursive, but %phi2 is not. 509 static bool isRecursivePHI(const PHINode *PHI) { 510 SmallVector<const PHINode *, 8> Worklist; 511 SmallPtrSet<const PHINode *, 8> Visited; 512 Worklist.push_back(PHI); 513 514 while (!Worklist.empty()) { 515 const PHINode *Cur = Worklist.pop_back_val(); 516 517 if (Visited.count(Cur)) 518 continue; 519 Visited.insert(Cur); 520 521 for (const Use &Incoming : Cur->incoming_values()) { 522 Value *IncomingVal = Incoming.get(); 523 auto *IncomingPHI = dyn_cast<PHINode>(IncomingVal); 524 if (!IncomingPHI) 525 continue; 526 527 if (IncomingPHI == PHI) 528 return true; 529 Worklist.push_back(IncomingPHI); 530 } 531 } 532 return false; 533 } 534 535 isl::union_map ZoneAlgorithm::computePerPHI(const ScopArrayInfo *SAI) { 536 // TODO: If the PHI has an incoming block from before the SCoP, it is not 537 // represented in any ScopStmt. 538 539 auto *PHI = cast<PHINode>(SAI->getBasePtr()); 540 auto It = PerPHIMaps.find(PHI); 541 if (It != PerPHIMaps.end()) 542 return It->second; 543 544 // Cannot reliably compute immediate predecessor for undefined executions, so 545 // bail out if we do not know. This in particular applies to undefined control 546 // flow. 547 isl::set DefinedContext = S->getDefinedBehaviorContext(); 548 if (!DefinedContext) 549 return nullptr; 550 551 assert(SAI->isPHIKind()); 552 553 // { DomainPHIWrite[] -> Scatter[] } 554 isl::union_map PHIWriteScatter = makeEmptyUnionMap(); 555 556 // Collect all incoming block timepoints. 557 for (MemoryAccess *MA : S->getPHIIncomings(SAI)) { 558 isl::map Scatter = getScatterFor(MA); 559 PHIWriteScatter = PHIWriteScatter.add_map(Scatter); 560 } 561 562 // { DomainPHIRead[] -> Scatter[] } 563 isl::map PHIReadScatter = getScatterFor(S->getPHIRead(SAI)); 564 565 // { DomainPHIRead[] -> Scatter[] } 566 isl::map BeforeRead = beforeScatter(PHIReadScatter, true); 567 568 // { Scatter[] } 569 isl::set WriteTimes = singleton(PHIWriteScatter.range(), ScatterSpace); 570 571 // { DomainPHIRead[] -> Scatter[] } 572 isl::map PHIWriteTimes = BeforeRead.intersect_range(WriteTimes); 573 574 // Remove instances outside the context. 575 PHIWriteTimes = PHIWriteTimes.intersect_params(DefinedContext); 576 577 isl::map LastPerPHIWrites = PHIWriteTimes.lexmax(); 578 579 // { DomainPHIRead[] -> DomainPHIWrite[] } 580 isl::union_map Result = 581 isl::union_map(LastPerPHIWrites).apply_range(PHIWriteScatter.reverse()); 582 assert(!Result.is_single_valued().is_false()); 583 assert(!Result.is_injective().is_false()); 584 585 PerPHIMaps.insert({PHI, Result}); 586 return Result; 587 } 588 589 isl::union_set ZoneAlgorithm::makeEmptyUnionSet() const { 590 return isl::union_set::empty(ParamSpace); 591 } 592 593 isl::union_map ZoneAlgorithm::makeEmptyUnionMap() const { 594 return isl::union_map::empty(ParamSpace); 595 } 596 597 void ZoneAlgorithm::collectCompatibleElts() { 598 // First find all the incompatible elements, then take the complement. 599 // We compile the list of compatible (rather than incompatible) elements so 600 // users can intersect with the list, not requiring a subtract operation. It 601 // also allows us to define a 'universe' of all elements and makes it more 602 // explicit in which array elements can be used. 603 isl::union_set AllElts = makeEmptyUnionSet(); 604 isl::union_set IncompatibleElts = makeEmptyUnionSet(); 605 606 for (auto &Stmt : *S) 607 collectIncompatibleElts(&Stmt, IncompatibleElts, AllElts); 608 609 NumIncompatibleArrays += isl_union_set_n_set(IncompatibleElts.get()); 610 CompatibleElts = AllElts.subtract(IncompatibleElts); 611 NumCompatibleArrays += isl_union_set_n_set(CompatibleElts.get()); 612 } 613 614 isl::map ZoneAlgorithm::getScatterFor(ScopStmt *Stmt) const { 615 isl::space ResultSpace = 616 Stmt->getDomainSpace().map_from_domain_and_range(ScatterSpace); 617 return Schedule.extract_map(ResultSpace); 618 } 619 620 isl::map ZoneAlgorithm::getScatterFor(MemoryAccess *MA) const { 621 return getScatterFor(MA->getStatement()); 622 } 623 624 isl::union_map ZoneAlgorithm::getScatterFor(isl::union_set Domain) const { 625 return Schedule.intersect_domain(Domain); 626 } 627 628 isl::map ZoneAlgorithm::getScatterFor(isl::set Domain) const { 629 auto ResultSpace = Domain.get_space().map_from_domain_and_range(ScatterSpace); 630 auto UDomain = isl::union_set(Domain); 631 auto UResult = getScatterFor(std::move(UDomain)); 632 auto Result = singleton(std::move(UResult), std::move(ResultSpace)); 633 assert(!Result || Result.domain().is_equal(Domain) == isl_bool_true); 634 return Result; 635 } 636 637 isl::set ZoneAlgorithm::getDomainFor(ScopStmt *Stmt) const { 638 return Stmt->getDomain().remove_redundancies(); 639 } 640 641 isl::set ZoneAlgorithm::getDomainFor(MemoryAccess *MA) const { 642 return getDomainFor(MA->getStatement()); 643 } 644 645 isl::map ZoneAlgorithm::getAccessRelationFor(MemoryAccess *MA) const { 646 auto Domain = getDomainFor(MA); 647 auto AccRel = MA->getLatestAccessRelation(); 648 return AccRel.intersect_domain(Domain); 649 } 650 651 isl::map ZoneAlgorithm::getDefToTarget(ScopStmt *DefStmt, 652 ScopStmt *TargetStmt) { 653 // No translation required if the definition is already at the target. 654 if (TargetStmt == DefStmt) 655 return isl::map::identity( 656 getDomainFor(TargetStmt).get_space().map_from_set()); 657 658 isl::map &Result = DefToTargetCache[std::make_pair(TargetStmt, DefStmt)]; 659 660 // This is a shortcut in case the schedule is still the original and 661 // TargetStmt is in the same or nested inside DefStmt's loop. With the 662 // additional assumption that operand trees do not cross DefStmt's loop 663 // header, then TargetStmt's instance shared coordinates are the same as 664 // DefStmt's coordinates. All TargetStmt instances with this prefix share 665 // the same DefStmt instance. 666 // Model: 667 // 668 // for (int i < 0; i < N; i+=1) { 669 // DefStmt: 670 // D = ...; 671 // for (int j < 0; j < N; j+=1) { 672 // TargetStmt: 673 // use(D); 674 // } 675 // } 676 // 677 // Here, the value used in TargetStmt is defined in the corresponding 678 // DefStmt, i.e. 679 // 680 // { DefStmt[i] -> TargetStmt[i,j] } 681 // 682 // In practice, this should cover the majority of cases. 683 if (!Result && S->isOriginalSchedule() && 684 isInsideLoop(DefStmt->getSurroundingLoop(), 685 TargetStmt->getSurroundingLoop())) { 686 isl::set DefDomain = getDomainFor(DefStmt); 687 isl::set TargetDomain = getDomainFor(TargetStmt); 688 assert(DefDomain.dim(isl::dim::set) <= TargetDomain.dim(isl::dim::set)); 689 690 Result = isl::map::from_domain_and_range(DefDomain, TargetDomain); 691 for (unsigned i = 0, DefDims = DefDomain.dim(isl::dim::set); i < DefDims; 692 i += 1) 693 Result = Result.equate(isl::dim::in, i, isl::dim::out, i); 694 } 695 696 if (!Result) { 697 // { DomainDef[] -> DomainTarget[] } 698 Result = computeUseToDefFlowDependency(TargetStmt, DefStmt).reverse(); 699 simplify(Result); 700 } 701 702 return Result; 703 } 704 705 isl::map ZoneAlgorithm::getScalarReachingDefinition(ScopStmt *Stmt) { 706 auto &Result = ScalarReachDefZone[Stmt]; 707 if (Result) 708 return Result; 709 710 auto Domain = getDomainFor(Stmt); 711 Result = computeScalarReachingDefinition(Schedule, Domain, false, true); 712 simplify(Result); 713 714 return Result; 715 } 716 717 isl::map ZoneAlgorithm::getScalarReachingDefinition(isl::set DomainDef) { 718 auto DomId = DomainDef.get_tuple_id(); 719 auto *Stmt = static_cast<ScopStmt *>(isl_id_get_user(DomId.get())); 720 721 auto StmtResult = getScalarReachingDefinition(Stmt); 722 723 return StmtResult.intersect_range(DomainDef); 724 } 725 726 isl::map ZoneAlgorithm::makeUnknownForDomain(ScopStmt *Stmt) const { 727 return ::makeUnknownForDomain(getDomainFor(Stmt)); 728 } 729 730 isl::id ZoneAlgorithm::makeValueId(Value *V) { 731 if (!V) 732 return nullptr; 733 734 auto &Id = ValueIds[V]; 735 if (Id.is_null()) { 736 auto Name = getIslCompatibleName("Val_", V, ValueIds.size() - 1, 737 std::string(), UseInstructionNames); 738 Id = isl::id::alloc(IslCtx.get(), Name.c_str(), V); 739 } 740 return Id; 741 } 742 743 isl::space ZoneAlgorithm::makeValueSpace(Value *V) { 744 auto Result = ParamSpace.set_from_params(); 745 return Result.set_tuple_id(isl::dim::set, makeValueId(V)); 746 } 747 748 isl::set ZoneAlgorithm::makeValueSet(Value *V) { 749 auto Space = makeValueSpace(V); 750 return isl::set::universe(Space); 751 } 752 753 isl::map ZoneAlgorithm::makeValInst(Value *Val, ScopStmt *UserStmt, Loop *Scope, 754 bool IsCertain) { 755 // If the definition/write is conditional, the value at the location could 756 // be either the written value or the old value. Since we cannot know which 757 // one, consider the value to be unknown. 758 if (!IsCertain) 759 return makeUnknownForDomain(UserStmt); 760 761 auto DomainUse = getDomainFor(UserStmt); 762 auto VUse = VirtualUse::create(S, UserStmt, Scope, Val, true); 763 switch (VUse.getKind()) { 764 case VirtualUse::Constant: 765 case VirtualUse::Block: 766 case VirtualUse::Hoisted: 767 case VirtualUse::ReadOnly: { 768 // The definition does not depend on the statement which uses it. 769 auto ValSet = makeValueSet(Val); 770 return isl::map::from_domain_and_range(DomainUse, ValSet); 771 } 772 773 case VirtualUse::Synthesizable: { 774 auto *ScevExpr = VUse.getScevExpr(); 775 auto UseDomainSpace = DomainUse.get_space(); 776 777 // Construct the SCEV space. 778 // TODO: Add only the induction variables referenced in SCEVAddRecExpr 779 // expressions, not just all of them. 780 auto ScevId = isl::manage(isl_id_alloc( 781 UseDomainSpace.get_ctx().get(), nullptr, const_cast<SCEV *>(ScevExpr))); 782 783 auto ScevSpace = UseDomainSpace.drop_dims(isl::dim::set, 0, 0); 784 ScevSpace = ScevSpace.set_tuple_id(isl::dim::set, ScevId); 785 786 // { DomainUse[] -> ScevExpr[] } 787 auto ValInst = 788 isl::map::identity(UseDomainSpace.map_from_domain_and_range(ScevSpace)); 789 return ValInst; 790 } 791 792 case VirtualUse::Intra: { 793 // Definition and use is in the same statement. We do not need to compute 794 // a reaching definition. 795 796 // { llvm::Value } 797 auto ValSet = makeValueSet(Val); 798 799 // { UserDomain[] -> llvm::Value } 800 auto ValInstSet = isl::map::from_domain_and_range(DomainUse, ValSet); 801 802 // { UserDomain[] -> [UserDomain[] - >llvm::Value] } 803 auto Result = ValInstSet.domain_map().reverse(); 804 simplify(Result); 805 return Result; 806 } 807 808 case VirtualUse::Inter: { 809 // The value is defined in a different statement. 810 811 auto *Inst = cast<Instruction>(Val); 812 auto *ValStmt = S->getStmtFor(Inst); 813 814 // If the llvm::Value is defined in a removed Stmt, we cannot derive its 815 // domain. We could use an arbitrary statement, but this could result in 816 // different ValInst[] for the same llvm::Value. 817 if (!ValStmt) 818 return ::makeUnknownForDomain(DomainUse); 819 820 // { DomainUse[] -> DomainDef[] } 821 auto UsedInstance = getDefToTarget(ValStmt, UserStmt).reverse(); 822 823 // { llvm::Value } 824 auto ValSet = makeValueSet(Val); 825 826 // { DomainUse[] -> llvm::Value[] } 827 auto ValInstSet = isl::map::from_domain_and_range(DomainUse, ValSet); 828 829 // { DomainUse[] -> [DomainDef[] -> llvm::Value] } 830 auto Result = UsedInstance.range_product(ValInstSet); 831 832 simplify(Result); 833 return Result; 834 } 835 } 836 llvm_unreachable("Unhandled use type"); 837 } 838 839 /// Remove all computed PHIs out of @p Input and replace by their incoming 840 /// value. 841 /// 842 /// @param Input { [] -> ValInst[] } 843 /// @param ComputedPHIs Set of PHIs that are replaced. Its ValInst must appear 844 /// on the LHS of @p NormalizeMap. 845 /// @param NormalizeMap { ValInst[] -> ValInst[] } 846 static isl::union_map normalizeValInst(isl::union_map Input, 847 const DenseSet<PHINode *> &ComputedPHIs, 848 isl::union_map NormalizeMap) { 849 isl::union_map Result = isl::union_map::empty(Input.get_space()); 850 for (isl::map Map : Input.get_map_list()) { 851 isl::space Space = Map.get_space(); 852 isl::space RangeSpace = Space.range(); 853 854 // Instructions within the SCoP are always wrapped. Non-wrapped tuples 855 // are therefore invariant in the SCoP and don't need normalization. 856 if (!RangeSpace.is_wrapping()) { 857 Result = Result.add_map(Map); 858 continue; 859 } 860 861 auto *PHI = dyn_cast<PHINode>(static_cast<Value *>( 862 RangeSpace.unwrap().get_tuple_id(isl::dim::out).get_user())); 863 864 // If no normalization is necessary, then the ValInst stands for itself. 865 if (!ComputedPHIs.count(PHI)) { 866 Result = Result.add_map(Map); 867 continue; 868 } 869 870 // Otherwise, apply the normalization. 871 isl::union_map Mapped = isl::union_map(Map).apply_range(NormalizeMap); 872 Result = Result.unite(Mapped); 873 NumPHINormialization++; 874 } 875 return Result; 876 } 877 878 isl::union_map ZoneAlgorithm::makeNormalizedValInst(llvm::Value *Val, 879 ScopStmt *UserStmt, 880 llvm::Loop *Scope, 881 bool IsCertain) { 882 isl::map ValInst = makeValInst(Val, UserStmt, Scope, IsCertain); 883 isl::union_map Normalized = 884 normalizeValInst(ValInst, ComputedPHIs, NormalizeMap); 885 return Normalized; 886 } 887 888 bool ZoneAlgorithm::isCompatibleAccess(MemoryAccess *MA) { 889 if (!MA) 890 return false; 891 if (!MA->isLatestArrayKind()) 892 return false; 893 Instruction *AccInst = MA->getAccessInstruction(); 894 return isa<StoreInst>(AccInst) || isa<LoadInst>(AccInst); 895 } 896 897 bool ZoneAlgorithm::isNormalizable(MemoryAccess *MA) { 898 assert(MA->isRead()); 899 900 // Exclude ExitPHIs, we are assuming that a normalizable PHI has a READ 901 // MemoryAccess. 902 if (!MA->isOriginalPHIKind()) 903 return false; 904 905 // Exclude recursive PHIs, normalizing them would require a transitive 906 // closure. 907 auto *PHI = cast<PHINode>(MA->getAccessInstruction()); 908 if (RecursivePHIs.count(PHI)) 909 return false; 910 911 // Ensure that each incoming value can be represented by a ValInst[]. 912 // We do represent values from statements associated to multiple incoming 913 // value by the PHI itself, but we do not handle this case yet (especially 914 // isNormalized()) when normalizing. 915 const ScopArrayInfo *SAI = MA->getOriginalScopArrayInfo(); 916 auto Incomings = S->getPHIIncomings(SAI); 917 for (MemoryAccess *Incoming : Incomings) { 918 if (Incoming->getIncoming().size() != 1) 919 return false; 920 } 921 922 return true; 923 } 924 925 isl::boolean ZoneAlgorithm::isNormalized(isl::map Map) { 926 isl::space Space = Map.get_space(); 927 isl::space RangeSpace = Space.range(); 928 929 isl::boolean IsWrapping = RangeSpace.is_wrapping(); 930 if (!IsWrapping.is_true()) 931 return !IsWrapping; 932 isl::space Unwrapped = RangeSpace.unwrap(); 933 934 isl::id OutTupleId = Unwrapped.get_tuple_id(isl::dim::out); 935 if (OutTupleId.is_null()) 936 return isl::boolean(); 937 auto *PHI = dyn_cast<PHINode>(static_cast<Value *>(OutTupleId.get_user())); 938 if (!PHI) 939 return true; 940 941 isl::id InTupleId = Unwrapped.get_tuple_id(isl::dim::in); 942 if (OutTupleId.is_null()) 943 return isl::boolean(); 944 auto *IncomingStmt = static_cast<ScopStmt *>(InTupleId.get_user()); 945 MemoryAccess *PHIRead = IncomingStmt->lookupPHIReadOf(PHI); 946 if (!isNormalizable(PHIRead)) 947 return true; 948 949 return false; 950 } 951 952 isl::boolean ZoneAlgorithm::isNormalized(isl::union_map UMap) { 953 isl::boolean Result = true; 954 for (isl::map Map : UMap.get_map_list()) { 955 Result = isNormalized(Map); 956 if (Result.is_true()) 957 continue; 958 break; 959 } 960 return Result; 961 } 962 963 void ZoneAlgorithm::computeCommon() { 964 AllReads = makeEmptyUnionMap(); 965 AllMayWrites = makeEmptyUnionMap(); 966 AllMustWrites = makeEmptyUnionMap(); 967 AllWriteValInst = makeEmptyUnionMap(); 968 AllReadValInst = makeEmptyUnionMap(); 969 970 // Default to empty, i.e. no normalization/replacement is taking place. Call 971 // computeNormalizedPHIs() to initialize. 972 NormalizeMap = makeEmptyUnionMap(); 973 ComputedPHIs.clear(); 974 975 for (auto &Stmt : *S) { 976 for (auto *MA : Stmt) { 977 if (!MA->isLatestArrayKind()) 978 continue; 979 980 if (MA->isRead()) 981 addArrayReadAccess(MA); 982 983 if (MA->isWrite()) 984 addArrayWriteAccess(MA); 985 } 986 } 987 988 // { DomainWrite[] -> Element[] } 989 AllWrites = AllMustWrites.unite(AllMayWrites); 990 991 // { [Element[] -> Zone[]] -> DomainWrite[] } 992 WriteReachDefZone = 993 computeReachingDefinition(Schedule, AllWrites, false, true); 994 simplify(WriteReachDefZone); 995 } 996 997 void ZoneAlgorithm::computeNormalizedPHIs() { 998 // Determine which PHIs can reference themselves. They are excluded from 999 // normalization to avoid problems with transitive closures. 1000 for (ScopStmt &Stmt : *S) { 1001 for (MemoryAccess *MA : Stmt) { 1002 if (!MA->isPHIKind()) 1003 continue; 1004 if (!MA->isRead()) 1005 continue; 1006 1007 // TODO: Can be more efficient since isRecursivePHI can theoretically 1008 // determine recursiveness for multiple values and/or cache results. 1009 auto *PHI = cast<PHINode>(MA->getAccessInstruction()); 1010 if (isRecursivePHI(PHI)) { 1011 NumRecursivePHIs++; 1012 RecursivePHIs.insert(PHI); 1013 } 1014 } 1015 } 1016 1017 // { PHIValInst[] -> IncomingValInst[] } 1018 isl::union_map AllPHIMaps = makeEmptyUnionMap(); 1019 1020 // Discover new PHIs and try to normalize them. 1021 DenseSet<PHINode *> AllPHIs; 1022 for (ScopStmt &Stmt : *S) { 1023 for (MemoryAccess *MA : Stmt) { 1024 if (!MA->isOriginalPHIKind()) 1025 continue; 1026 if (!MA->isRead()) 1027 continue; 1028 if (!isNormalizable(MA)) 1029 continue; 1030 1031 auto *PHI = cast<PHINode>(MA->getAccessInstruction()); 1032 const ScopArrayInfo *SAI = MA->getOriginalScopArrayInfo(); 1033 1034 // Determine which instance of the PHI statement corresponds to which 1035 // incoming value. Skip if we cannot determine PHI predecessors. 1036 // { PHIDomain[] -> IncomingDomain[] } 1037 isl::union_map PerPHI = computePerPHI(SAI); 1038 if (!PerPHI) 1039 continue; 1040 1041 // { PHIDomain[] -> PHIValInst[] } 1042 isl::map PHIValInst = makeValInst(PHI, &Stmt, Stmt.getSurroundingLoop()); 1043 1044 // { IncomingDomain[] -> IncomingValInst[] } 1045 isl::union_map IncomingValInsts = makeEmptyUnionMap(); 1046 1047 // Get all incoming values. 1048 for (MemoryAccess *MA : S->getPHIIncomings(SAI)) { 1049 ScopStmt *IncomingStmt = MA->getStatement(); 1050 1051 auto Incoming = MA->getIncoming(); 1052 assert(Incoming.size() == 1 && "The incoming value must be " 1053 "representable by something else than " 1054 "the PHI itself"); 1055 Value *IncomingVal = Incoming[0].second; 1056 1057 // { IncomingDomain[] -> IncomingValInst[] } 1058 isl::map IncomingValInst = makeValInst( 1059 IncomingVal, IncomingStmt, IncomingStmt->getSurroundingLoop()); 1060 1061 IncomingValInsts = IncomingValInsts.add_map(IncomingValInst); 1062 } 1063 1064 // { PHIValInst[] -> IncomingValInst[] } 1065 isl::union_map PHIMap = 1066 PerPHI.apply_domain(PHIValInst).apply_range(IncomingValInsts); 1067 assert(!PHIMap.is_single_valued().is_false()); 1068 1069 // Resolve transitiveness: The incoming value of the newly discovered PHI 1070 // may reference a previously normalized PHI. At the same time, already 1071 // normalized PHIs might be normalized to the new PHI. At the end, none of 1072 // the PHIs may appear on the right-hand-side of the normalization map. 1073 PHIMap = normalizeValInst(PHIMap, AllPHIs, AllPHIMaps); 1074 AllPHIs.insert(PHI); 1075 AllPHIMaps = normalizeValInst(AllPHIMaps, AllPHIs, PHIMap); 1076 1077 AllPHIMaps = AllPHIMaps.unite(PHIMap); 1078 NumNormalizablePHIs++; 1079 } 1080 } 1081 simplify(AllPHIMaps); 1082 1083 // Apply the normalization. 1084 ComputedPHIs = AllPHIs; 1085 NormalizeMap = AllPHIMaps; 1086 1087 assert(!NormalizeMap || isNormalized(NormalizeMap)); 1088 } 1089 1090 void ZoneAlgorithm::printAccesses(llvm::raw_ostream &OS, int Indent) const { 1091 OS.indent(Indent) << "After accesses {\n"; 1092 for (auto &Stmt : *S) { 1093 OS.indent(Indent + 4) << Stmt.getBaseName() << "\n"; 1094 for (auto *MA : Stmt) 1095 MA->print(OS); 1096 } 1097 OS.indent(Indent) << "}\n"; 1098 } 1099 1100 isl::union_map ZoneAlgorithm::computeKnownFromMustWrites() const { 1101 // { [Element[] -> Zone[]] -> [Element[] -> DomainWrite[]] } 1102 isl::union_map EltReachdDef = distributeDomain(WriteReachDefZone.curry()); 1103 1104 // { [Element[] -> DomainWrite[]] -> ValInst[] } 1105 isl::union_map AllKnownWriteValInst = filterKnownValInst(AllWriteValInst); 1106 1107 // { [Element[] -> Zone[]] -> ValInst[] } 1108 return EltReachdDef.apply_range(AllKnownWriteValInst); 1109 } 1110 1111 isl::union_map ZoneAlgorithm::computeKnownFromLoad() const { 1112 // { Element[] } 1113 isl::union_set AllAccessedElts = AllReads.range().unite(AllWrites.range()); 1114 1115 // { Element[] -> Scatter[] } 1116 isl::union_map EltZoneUniverse = isl::union_map::from_domain_and_range( 1117 AllAccessedElts, isl::set::universe(ScatterSpace)); 1118 1119 // This assumes there are no "holes" in 1120 // isl_union_map_domain(WriteReachDefZone); alternatively, compute the zone 1121 // before the first write or that are not written at all. 1122 // { Element[] -> Scatter[] } 1123 isl::union_set NonReachDef = 1124 EltZoneUniverse.wrap().subtract(WriteReachDefZone.domain()); 1125 1126 // { [Element[] -> Zone[]] -> ReachDefId[] } 1127 isl::union_map DefZone = 1128 WriteReachDefZone.unite(isl::union_map::from_domain(NonReachDef)); 1129 1130 // { [Element[] -> Scatter[]] -> Element[] } 1131 isl::union_map EltZoneElt = EltZoneUniverse.domain_map(); 1132 1133 // { [Element[] -> Zone[]] -> [Element[] -> ReachDefId[]] } 1134 isl::union_map DefZoneEltDefId = EltZoneElt.range_product(DefZone); 1135 1136 // { Element[] -> [Zone[] -> ReachDefId[]] } 1137 isl::union_map EltDefZone = DefZone.curry(); 1138 1139 // { [Element[] -> Zone[] -> [Element[] -> ReachDefId[]] } 1140 isl::union_map EltZoneEltDefid = distributeDomain(EltDefZone); 1141 1142 // { [Element[] -> Scatter[]] -> DomainRead[] } 1143 isl::union_map Reads = AllReads.range_product(Schedule).reverse(); 1144 1145 // { [Element[] -> Scatter[]] -> [Element[] -> DomainRead[]] } 1146 isl::union_map ReadsElt = EltZoneElt.range_product(Reads); 1147 1148 // { [Element[] -> Scatter[]] -> ValInst[] } 1149 isl::union_map ScatterKnown = ReadsElt.apply_range(AllReadValInst); 1150 1151 // { [Element[] -> ReachDefId[]] -> ValInst[] } 1152 isl::union_map DefidKnown = 1153 DefZoneEltDefId.apply_domain(ScatterKnown).reverse(); 1154 1155 // { [Element[] -> Zone[]] -> ValInst[] } 1156 return DefZoneEltDefId.apply_range(DefidKnown); 1157 } 1158 1159 isl::union_map ZoneAlgorithm::computeKnown(bool FromWrite, 1160 bool FromRead) const { 1161 isl::union_map Result = makeEmptyUnionMap(); 1162 1163 if (FromWrite) 1164 Result = Result.unite(computeKnownFromMustWrites()); 1165 1166 if (FromRead) 1167 Result = Result.unite(computeKnownFromLoad()); 1168 1169 simplify(Result); 1170 return Result; 1171 } 1172