1 //===------ ZoneAlgo.cpp ----------------------------------------*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // Derive information about array elements between statements ("Zones"). 10 // 11 // The algorithms here work on the scatter space - the image space of the 12 // schedule returned by Scop::getSchedule(). We call an element in that space a 13 // "timepoint". Timepoints are lexicographically ordered such that we can 14 // defined ranges in the scatter space. We use two flavors of such ranges: 15 // Timepoint sets and zones. A timepoint set is simply a subset of the scatter 16 // space and is directly stored as isl_set. 17 // 18 // Zones are used to describe the space between timepoints as open sets, i.e. 19 // they do not contain the extrema. Using isl rational sets to express these 20 // would be overkill. We also cannot store them as the integer timepoints they 21 // contain; the (nonempty) zone between 1 and 2 would be empty and 22 // indistinguishable from e.g. the zone between 3 and 4. Also, we cannot store 23 // the integer set including the extrema; the set ]1,2[ + ]3,4[ could be 24 // coalesced to ]1,3[, although we defined the range [2,3] to be not in the set. 25 // Instead, we store the "half-open" integer extrema, including the lower bound, 26 // but excluding the upper bound. Examples: 27 // 28 // * The set { [i] : 1 <= i <= 3 } represents the zone ]0,3[ (which contains the 29 // integer points 1 and 2, but not 0 or 3) 30 // 31 // * { [1] } represents the zone ]0,1[ 32 // 33 // * { [i] : i = 1 or i = 3 } represents the zone ]0,1[ + ]2,3[ 34 // 35 // Therefore, an integer i in the set represents the zone ]i-1,i[, i.e. strictly 36 // speaking the integer points never belong to the zone. However, depending an 37 // the interpretation, one might want to include them. Part of the 38 // interpretation may not be known when the zone is constructed. 39 // 40 // Reads are assumed to always take place before writes, hence we can think of 41 // reads taking place at the beginning of a timepoint and writes at the end. 42 // 43 // Let's assume that the zone represents the lifetime of a variable. That is, 44 // the zone begins with a write that defines the value during its lifetime and 45 // ends with the last read of that value. In the following we consider whether a 46 // read/write at the beginning/ending of the lifetime zone should be within the 47 // zone or outside of it. 48 // 49 // * A read at the timepoint that starts the live-range loads the previous 50 // value. Hence, exclude the timepoint starting the zone. 51 // 52 // * A write at the timepoint that starts the live-range is not defined whether 53 // it occurs before or after the write that starts the lifetime. We do not 54 // allow this situation to occur. Hence, we include the timepoint starting the 55 // zone to determine whether they are conflicting. 56 // 57 // * A read at the timepoint that ends the live-range reads the same variable. 58 // We include the timepoint at the end of the zone to include that read into 59 // the live-range. Doing otherwise would mean that the two reads access 60 // different values, which would mean that the value they read are both alive 61 // at the same time but occupy the same variable. 62 // 63 // * A write at the timepoint that ends the live-range starts a new live-range. 64 // It must not be included in the live-range of the previous definition. 65 // 66 // All combinations of reads and writes at the endpoints are possible, but most 67 // of the time only the write->read (for instance, a live-range from definition 68 // to last use) and read->write (for instance, an unused range from last use to 69 // overwrite) and combinations are interesting (half-open ranges). write->write 70 // zones might be useful as well in some context to represent 71 // output-dependencies. 72 // 73 // @see convertZoneToTimepoints 74 // 75 // 76 // The code makes use of maps and sets in many different spaces. To not loose 77 // track in which space a set or map is expected to be in, variables holding an 78 // isl reference are usually annotated in the comments. They roughly follow isl 79 // syntax for spaces, but only the tuples, not the dimensions. The tuples have a 80 // meaning as follows: 81 // 82 // * Space[] - An unspecified tuple. Used for function parameters such that the 83 // function caller can use it for anything they like. 84 // 85 // * Domain[] - A statement instance as returned by ScopStmt::getDomain() 86 // isl_id_get_name: Stmt_<NameOfBasicBlock> 87 // isl_id_get_user: Pointer to ScopStmt 88 // 89 // * Element[] - An array element as in the range part of 90 // MemoryAccess::getAccessRelation() 91 // isl_id_get_name: MemRef_<NameOfArrayVariable> 92 // isl_id_get_user: Pointer to ScopArrayInfo 93 // 94 // * Scatter[] - Scatter space or space of timepoints 95 // Has no tuple id 96 // 97 // * Zone[] - Range between timepoints as described above 98 // Has no tuple id 99 // 100 // * ValInst[] - An llvm::Value as defined at a specific timepoint. 101 // 102 // A ValInst[] itself can be structured as one of: 103 // 104 // * [] - An unknown value. 105 // Always zero dimensions 106 // Has no tuple id 107 // 108 // * Value[] - An llvm::Value that is read-only in the SCoP, i.e. its 109 // runtime content does not depend on the timepoint. 110 // Always zero dimensions 111 // isl_id_get_name: Val_<NameOfValue> 112 // isl_id_get_user: A pointer to an llvm::Value 113 // 114 // * SCEV[...] - A synthesizable llvm::SCEV Expression. 115 // In contrast to a Value[] is has at least one dimension per 116 // SCEVAddRecExpr in the SCEV. 117 // 118 // * [Domain[] -> Value[]] - An llvm::Value that may change during the 119 // Scop's execution. 120 // The tuple itself has no id, but it wraps a map space holding a 121 // statement instance which defines the llvm::Value as the map's domain 122 // and llvm::Value itself as range. 123 // 124 // @see makeValInst() 125 // 126 // An annotation "{ Domain[] -> Scatter[] }" therefore means: A map from a 127 // statement instance to a timepoint, aka a schedule. There is only one scatter 128 // space, but most of the time multiple statements are processed in one set. 129 // This is why most of the time isl_union_map has to be used. 130 // 131 // The basic algorithm works as follows: 132 // At first we verify that the SCoP is compatible with this technique. For 133 // instance, two writes cannot write to the same location at the same statement 134 // instance because we cannot determine within the polyhedral model which one 135 // comes first. Once this was verified, we compute zones at which an array 136 // element is unused. This computation can fail if it takes too long. Then the 137 // main algorithm is executed. Because every store potentially trails an unused 138 // zone, we start at stores. We search for a scalar (MemoryKind::Value or 139 // MemoryKind::PHI) that we can map to the array element overwritten by the 140 // store, preferably one that is used by the store or at least the ScopStmt. 141 // When it does not conflict with the lifetime of the values in the array 142 // element, the map is applied and the unused zone updated as it is now used. We 143 // continue to try to map scalars to the array element until there are no more 144 // candidates to map. The algorithm is greedy in the sense that the first scalar 145 // not conflicting will be mapped. Other scalars processed later that could have 146 // fit the same unused zone will be rejected. As such the result depends on the 147 // processing order. 148 // 149 //===----------------------------------------------------------------------===// 150 151 #include "polly/ZoneAlgo.h" 152 #include "polly/ScopInfo.h" 153 #include "polly/Support/GICHelper.h" 154 #include "polly/Support/ISLTools.h" 155 #include "polly/Support/VirtualInstruction.h" 156 #include "llvm/ADT/Statistic.h" 157 #include "llvm/Support/raw_ostream.h" 158 159 #define DEBUG_TYPE "polly-zone" 160 161 STATISTIC(NumIncompatibleArrays, "Number of not zone-analyzable arrays"); 162 STATISTIC(NumCompatibleArrays, "Number of zone-analyzable arrays"); 163 STATISTIC(NumRecursivePHIs, "Number of recursive PHIs"); 164 STATISTIC(NumNormalizablePHIs, "Number of normalizable PHIs"); 165 STATISTIC(NumPHINormialization, "Number of PHI executed normalizations"); 166 167 using namespace polly; 168 using namespace llvm; 169 170 static isl::union_map computeReachingDefinition(isl::union_map Schedule, 171 isl::union_map Writes, 172 bool InclDef, bool InclRedef) { 173 return computeReachingWrite(Schedule, Writes, false, InclDef, InclRedef); 174 } 175 176 /// Compute the reaching definition of a scalar. 177 /// 178 /// Compared to computeReachingDefinition, there is just one element which is 179 /// accessed and therefore only a set if instances that accesses that element is 180 /// required. 181 /// 182 /// @param Schedule { DomainWrite[] -> Scatter[] } 183 /// @param Writes { DomainWrite[] } 184 /// @param InclDef Include the timepoint of the definition to the result. 185 /// @param InclRedef Include the timepoint of the overwrite into the result. 186 /// 187 /// @return { Scatter[] -> DomainWrite[] } 188 static isl::union_map computeScalarReachingDefinition(isl::union_map Schedule, 189 isl::union_set Writes, 190 bool InclDef, 191 bool InclRedef) { 192 // { DomainWrite[] -> Element[] } 193 isl::union_map Defs = isl::union_map::from_domain(Writes); 194 195 // { [Element[] -> Scatter[]] -> DomainWrite[] } 196 auto ReachDefs = 197 computeReachingDefinition(Schedule, Defs, InclDef, InclRedef); 198 199 // { Scatter[] -> DomainWrite[] } 200 return ReachDefs.curry().range().unwrap(); 201 } 202 203 /// Compute the reaching definition of a scalar. 204 /// 205 /// This overload accepts only a single writing statement as an isl_map, 206 /// consequently the result also is only a single isl_map. 207 /// 208 /// @param Schedule { DomainWrite[] -> Scatter[] } 209 /// @param Writes { DomainWrite[] } 210 /// @param InclDef Include the timepoint of the definition to the result. 211 /// @param InclRedef Include the timepoint of the overwrite into the result. 212 /// 213 /// @return { Scatter[] -> DomainWrite[] } 214 static isl::map computeScalarReachingDefinition(isl::union_map Schedule, 215 isl::set Writes, bool InclDef, 216 bool InclRedef) { 217 isl::space DomainSpace = Writes.get_space(); 218 isl::space ScatterSpace = getScatterSpace(Schedule); 219 220 // { Scatter[] -> DomainWrite[] } 221 isl::union_map UMap = computeScalarReachingDefinition( 222 Schedule, isl::union_set(Writes), InclDef, InclRedef); 223 224 isl::space ResultSpace = ScatterSpace.map_from_domain_and_range(DomainSpace); 225 return singleton(UMap, ResultSpace); 226 } 227 228 isl::union_map polly::makeUnknownForDomain(isl::union_set Domain) { 229 return isl::union_map::from_domain(Domain); 230 } 231 232 /// Create a domain-to-unknown value mapping. 233 /// 234 /// @see makeUnknownForDomain(isl::union_set) 235 /// 236 /// @param Domain { Domain[] } 237 /// 238 /// @return { Domain[] -> ValInst[] } 239 static isl::map makeUnknownForDomain(isl::set Domain) { 240 return isl::map::from_domain(Domain); 241 } 242 243 /// Return whether @p Map maps to an unknown value. 244 /// 245 /// @param { [] -> ValInst[] } 246 static bool isMapToUnknown(const isl::map &Map) { 247 isl::space Space = Map.get_space().range(); 248 return Space.has_tuple_id(isl::dim::set).is_false() && 249 Space.is_wrapping().is_false() && 250 Space.dim(isl::dim::set).release() == 0; 251 } 252 253 isl::union_map polly::filterKnownValInst(const isl::union_map &UMap) { 254 isl::union_map Result = isl::union_map::empty(UMap.ctx()); 255 for (isl::map Map : UMap.get_map_list()) { 256 if (!isMapToUnknown(Map)) 257 Result = Result.unite(Map); 258 } 259 return Result; 260 } 261 262 ZoneAlgorithm::ZoneAlgorithm(const char *PassName, Scop *S, LoopInfo *LI) 263 : PassName(PassName), IslCtx(S->getSharedIslCtx()), S(S), LI(LI), 264 Schedule(S->getSchedule()) { 265 auto Domains = S->getDomains(); 266 267 Schedule = Schedule.intersect_domain(Domains); 268 ParamSpace = Schedule.get_space(); 269 ScatterSpace = getScatterSpace(Schedule); 270 } 271 272 /// Check if all stores in @p Stmt store the very same value. 273 /// 274 /// This covers a special situation occurring in Polybench's 275 /// covariance/correlation (which is typical for algorithms that cover symmetric 276 /// matrices): 277 /// 278 /// for (int i = 0; i < n; i += 1) 279 /// for (int j = 0; j <= i; j += 1) { 280 /// double x = ...; 281 /// C[i][j] = x; 282 /// C[j][i] = x; 283 /// } 284 /// 285 /// For i == j, the same value is written twice to the same element.Double 286 /// writes to the same element are not allowed in DeLICM because its algorithm 287 /// does not see which of the writes is effective.But if its the same value 288 /// anyway, it doesn't matter. 289 /// 290 /// LLVM passes, however, cannot simplify this because the write is necessary 291 /// for i != j (unless it would add a condition for one of the writes to occur 292 /// only if i != j). 293 /// 294 /// TODO: In the future we may want to extent this to make the checks 295 /// specific to different memory locations. 296 static bool onlySameValueWrites(ScopStmt *Stmt) { 297 Value *V = nullptr; 298 299 for (auto *MA : *Stmt) { 300 if (!MA->isLatestArrayKind() || !MA->isMustWrite() || 301 !MA->isOriginalArrayKind()) 302 continue; 303 304 if (!V) { 305 V = MA->getAccessValue(); 306 continue; 307 } 308 309 if (V != MA->getAccessValue()) 310 return false; 311 } 312 return true; 313 } 314 315 /// Is @p InnerLoop nested inside @p OuterLoop? 316 static bool isInsideLoop(Loop *OuterLoop, Loop *InnerLoop) { 317 // If OuterLoop is nullptr, we cannot call its contains() method. In this case 318 // OuterLoop represents the 'top level' and therefore contains all loop. 319 return !OuterLoop || OuterLoop->contains(InnerLoop); 320 } 321 322 void ZoneAlgorithm::collectIncompatibleElts(ScopStmt *Stmt, 323 isl::union_set &IncompatibleElts, 324 isl::union_set &AllElts) { 325 auto Stores = makeEmptyUnionMap(); 326 auto Loads = makeEmptyUnionMap(); 327 328 // This assumes that the MemoryKind::Array MemoryAccesses are iterated in 329 // order. 330 for (auto *MA : *Stmt) { 331 if (!MA->isOriginalArrayKind()) 332 continue; 333 334 isl::map AccRelMap = getAccessRelationFor(MA); 335 isl::union_map AccRel = AccRelMap; 336 337 // To avoid solving any ILP problems, always add entire arrays instead of 338 // just the elements that are accessed. 339 auto ArrayElts = isl::set::universe(AccRelMap.get_space().range()); 340 AllElts = AllElts.unite(ArrayElts); 341 342 if (MA->isRead()) { 343 // Reject load after store to same location. 344 if (!Stores.is_disjoint(AccRel)) { 345 LLVM_DEBUG( 346 dbgs() << "Load after store of same element in same statement\n"); 347 OptimizationRemarkMissed R(PassName, "LoadAfterStore", 348 MA->getAccessInstruction()); 349 R << "load after store of same element in same statement"; 350 R << " (previous stores: " << Stores; 351 R << ", loading: " << AccRel << ")"; 352 S->getFunction().getContext().diagnose(R); 353 354 IncompatibleElts = IncompatibleElts.unite(ArrayElts); 355 } 356 357 Loads = Loads.unite(AccRel); 358 359 continue; 360 } 361 362 // In region statements the order is less clear, eg. the load and store 363 // might be in a boxed loop. 364 if (Stmt->isRegionStmt() && !Loads.is_disjoint(AccRel)) { 365 LLVM_DEBUG(dbgs() << "WRITE in non-affine subregion not supported\n"); 366 OptimizationRemarkMissed R(PassName, "StoreInSubregion", 367 MA->getAccessInstruction()); 368 R << "store is in a non-affine subregion"; 369 S->getFunction().getContext().diagnose(R); 370 371 IncompatibleElts = IncompatibleElts.unite(ArrayElts); 372 } 373 374 // Do not allow more than one store to the same location. 375 if (!Stores.is_disjoint(AccRel) && !onlySameValueWrites(Stmt)) { 376 LLVM_DEBUG(dbgs() << "WRITE after WRITE to same element\n"); 377 OptimizationRemarkMissed R(PassName, "StoreAfterStore", 378 MA->getAccessInstruction()); 379 R << "store after store of same element in same statement"; 380 R << " (previous stores: " << Stores; 381 R << ", storing: " << AccRel << ")"; 382 S->getFunction().getContext().diagnose(R); 383 384 IncompatibleElts = IncompatibleElts.unite(ArrayElts); 385 } 386 387 Stores = Stores.unite(AccRel); 388 } 389 } 390 391 void ZoneAlgorithm::addArrayReadAccess(MemoryAccess *MA) { 392 assert(MA->isLatestArrayKind()); 393 assert(MA->isRead()); 394 ScopStmt *Stmt = MA->getStatement(); 395 396 // { DomainRead[] -> Element[] } 397 auto AccRel = intersectRange(getAccessRelationFor(MA), CompatibleElts); 398 AllReads = AllReads.unite(AccRel); 399 400 if (LoadInst *Load = dyn_cast_or_null<LoadInst>(MA->getAccessInstruction())) { 401 // { DomainRead[] -> ValInst[] } 402 isl::map LoadValInst = makeValInst( 403 Load, Stmt, LI->getLoopFor(Load->getParent()), Stmt->isBlockStmt()); 404 405 // { DomainRead[] -> [Element[] -> DomainRead[]] } 406 isl::map IncludeElement = AccRel.domain_map().curry(); 407 408 // { [Element[] -> DomainRead[]] -> ValInst[] } 409 isl::map EltLoadValInst = LoadValInst.apply_domain(IncludeElement); 410 411 AllReadValInst = AllReadValInst.unite(EltLoadValInst); 412 } 413 } 414 415 isl::union_map ZoneAlgorithm::getWrittenValue(MemoryAccess *MA, 416 isl::map AccRel) { 417 if (!MA->isMustWrite()) 418 return {}; 419 420 Value *AccVal = MA->getAccessValue(); 421 ScopStmt *Stmt = MA->getStatement(); 422 Instruction *AccInst = MA->getAccessInstruction(); 423 424 // Write a value to a single element. 425 auto L = MA->isOriginalArrayKind() ? LI->getLoopFor(AccInst->getParent()) 426 : Stmt->getSurroundingLoop(); 427 if (AccVal && 428 AccVal->getType() == MA->getLatestScopArrayInfo()->getElementType() && 429 AccRel.is_single_valued().is_true()) 430 return makeNormalizedValInst(AccVal, Stmt, L); 431 432 // memset(_, '0', ) is equivalent to writing the null value to all touched 433 // elements. isMustWrite() ensures that all of an element's bytes are 434 // overwritten. 435 if (auto *Memset = dyn_cast<MemSetInst>(AccInst)) { 436 auto *WrittenConstant = dyn_cast<Constant>(Memset->getValue()); 437 Type *Ty = MA->getLatestScopArrayInfo()->getElementType(); 438 if (WrittenConstant && WrittenConstant->isZeroValue()) { 439 Constant *Zero = Constant::getNullValue(Ty); 440 return makeNormalizedValInst(Zero, Stmt, L); 441 } 442 } 443 444 return {}; 445 } 446 447 void ZoneAlgorithm::addArrayWriteAccess(MemoryAccess *MA) { 448 assert(MA->isLatestArrayKind()); 449 assert(MA->isWrite()); 450 auto *Stmt = MA->getStatement(); 451 452 // { Domain[] -> Element[] } 453 isl::map AccRel = intersectRange(getAccessRelationFor(MA), CompatibleElts); 454 455 if (MA->isMustWrite()) 456 AllMustWrites = AllMustWrites.unite(AccRel); 457 458 if (MA->isMayWrite()) 459 AllMayWrites = AllMayWrites.unite(AccRel); 460 461 // { Domain[] -> ValInst[] } 462 isl::union_map WriteValInstance = getWrittenValue(MA, AccRel); 463 if (WriteValInstance.is_null()) 464 WriteValInstance = makeUnknownForDomain(Stmt); 465 466 // { Domain[] -> [Element[] -> Domain[]] } 467 isl::map IncludeElement = AccRel.domain_map().curry(); 468 469 // { [Element[] -> DomainWrite[]] -> ValInst[] } 470 isl::union_map EltWriteValInst = 471 WriteValInstance.apply_domain(IncludeElement); 472 473 AllWriteValInst = AllWriteValInst.unite(EltWriteValInst); 474 } 475 476 /// For an llvm::Value defined in @p DefStmt, compute the RAW dependency for a 477 /// use in every instance of @p UseStmt. 478 /// 479 /// @param UseStmt Statement a scalar is used in. 480 /// @param DefStmt Statement a scalar is defined in. 481 /// 482 /// @return { DomainUse[] -> DomainDef[] } 483 isl::map ZoneAlgorithm::computeUseToDefFlowDependency(ScopStmt *UseStmt, 484 ScopStmt *DefStmt) { 485 // { DomainUse[] -> Scatter[] } 486 isl::map UseScatter = getScatterFor(UseStmt); 487 488 // { Zone[] -> DomainDef[] } 489 isl::map ReachDefZone = getScalarReachingDefinition(DefStmt); 490 491 // { Scatter[] -> DomainDef[] } 492 isl::map ReachDefTimepoints = 493 convertZoneToTimepoints(ReachDefZone, isl::dim::in, false, true); 494 495 // { DomainUse[] -> DomainDef[] } 496 return UseScatter.apply_range(ReachDefTimepoints); 497 } 498 499 /// Return whether @p PHI refers (also transitively through other PHIs) to 500 /// itself. 501 /// 502 /// loop: 503 /// %phi1 = phi [0, %preheader], [%phi1, %loop] 504 /// br i1 %c, label %loop, label %exit 505 /// 506 /// exit: 507 /// %phi2 = phi [%phi1, %bb] 508 /// 509 /// In this example, %phi1 is recursive, but %phi2 is not. 510 static bool isRecursivePHI(const PHINode *PHI) { 511 SmallVector<const PHINode *, 8> Worklist; 512 SmallPtrSet<const PHINode *, 8> Visited; 513 Worklist.push_back(PHI); 514 515 while (!Worklist.empty()) { 516 const PHINode *Cur = Worklist.pop_back_val(); 517 518 if (Visited.count(Cur)) 519 continue; 520 Visited.insert(Cur); 521 522 for (const Use &Incoming : Cur->incoming_values()) { 523 Value *IncomingVal = Incoming.get(); 524 auto *IncomingPHI = dyn_cast<PHINode>(IncomingVal); 525 if (!IncomingPHI) 526 continue; 527 528 if (IncomingPHI == PHI) 529 return true; 530 Worklist.push_back(IncomingPHI); 531 } 532 } 533 return false; 534 } 535 536 isl::union_map ZoneAlgorithm::computePerPHI(const ScopArrayInfo *SAI) { 537 // TODO: If the PHI has an incoming block from before the SCoP, it is not 538 // represented in any ScopStmt. 539 540 auto *PHI = cast<PHINode>(SAI->getBasePtr()); 541 auto It = PerPHIMaps.find(PHI); 542 if (It != PerPHIMaps.end()) 543 return It->second; 544 545 // Cannot reliably compute immediate predecessor for undefined executions, so 546 // bail out if we do not know. This in particular applies to undefined control 547 // flow. 548 isl::set DefinedContext = S->getDefinedBehaviorContext(); 549 if (DefinedContext.is_null()) 550 return {}; 551 552 assert(SAI->isPHIKind()); 553 554 // { DomainPHIWrite[] -> Scatter[] } 555 isl::union_map PHIWriteScatter = makeEmptyUnionMap(); 556 557 // Collect all incoming block timepoints. 558 for (MemoryAccess *MA : S->getPHIIncomings(SAI)) { 559 isl::map Scatter = getScatterFor(MA); 560 PHIWriteScatter = PHIWriteScatter.unite(Scatter); 561 } 562 563 // { DomainPHIRead[] -> Scatter[] } 564 isl::map PHIReadScatter = getScatterFor(S->getPHIRead(SAI)); 565 566 // { DomainPHIRead[] -> Scatter[] } 567 isl::map BeforeRead = beforeScatter(PHIReadScatter, true); 568 569 // { Scatter[] } 570 isl::set WriteTimes = singleton(PHIWriteScatter.range(), ScatterSpace); 571 572 // { DomainPHIRead[] -> Scatter[] } 573 isl::map PHIWriteTimes = BeforeRead.intersect_range(WriteTimes); 574 575 // Remove instances outside the context. 576 PHIWriteTimes = PHIWriteTimes.intersect_params(DefinedContext); 577 578 isl::map LastPerPHIWrites = PHIWriteTimes.lexmax(); 579 580 // { DomainPHIRead[] -> DomainPHIWrite[] } 581 isl::union_map Result = 582 isl::union_map(LastPerPHIWrites).apply_range(PHIWriteScatter.reverse()); 583 assert(!Result.is_single_valued().is_false()); 584 assert(!Result.is_injective().is_false()); 585 586 PerPHIMaps.insert({PHI, Result}); 587 return Result; 588 } 589 590 isl::union_set ZoneAlgorithm::makeEmptyUnionSet() const { 591 return isl::union_set::empty(ParamSpace.ctx()); 592 } 593 594 isl::union_map ZoneAlgorithm::makeEmptyUnionMap() const { 595 return isl::union_map::empty(ParamSpace.ctx()); 596 } 597 598 void ZoneAlgorithm::collectCompatibleElts() { 599 // First find all the incompatible elements, then take the complement. 600 // We compile the list of compatible (rather than incompatible) elements so 601 // users can intersect with the list, not requiring a subtract operation. It 602 // also allows us to define a 'universe' of all elements and makes it more 603 // explicit in which array elements can be used. 604 isl::union_set AllElts = makeEmptyUnionSet(); 605 isl::union_set IncompatibleElts = makeEmptyUnionSet(); 606 607 for (auto &Stmt : *S) 608 collectIncompatibleElts(&Stmt, IncompatibleElts, AllElts); 609 610 NumIncompatibleArrays += isl_union_set_n_set(IncompatibleElts.get()); 611 CompatibleElts = AllElts.subtract(IncompatibleElts); 612 NumCompatibleArrays += isl_union_set_n_set(CompatibleElts.get()); 613 } 614 615 isl::map ZoneAlgorithm::getScatterFor(ScopStmt *Stmt) const { 616 isl::space ResultSpace = 617 Stmt->getDomainSpace().map_from_domain_and_range(ScatterSpace); 618 return Schedule.extract_map(ResultSpace); 619 } 620 621 isl::map ZoneAlgorithm::getScatterFor(MemoryAccess *MA) const { 622 return getScatterFor(MA->getStatement()); 623 } 624 625 isl::union_map ZoneAlgorithm::getScatterFor(isl::union_set Domain) const { 626 return Schedule.intersect_domain(Domain); 627 } 628 629 isl::map ZoneAlgorithm::getScatterFor(isl::set Domain) const { 630 auto ResultSpace = Domain.get_space().map_from_domain_and_range(ScatterSpace); 631 auto UDomain = isl::union_set(Domain); 632 auto UResult = getScatterFor(std::move(UDomain)); 633 auto Result = singleton(std::move(UResult), std::move(ResultSpace)); 634 assert(Result.is_null() || Result.domain().is_equal(Domain) == isl_bool_true); 635 return Result; 636 } 637 638 isl::set ZoneAlgorithm::getDomainFor(ScopStmt *Stmt) const { 639 return Stmt->getDomain().remove_redundancies(); 640 } 641 642 isl::set ZoneAlgorithm::getDomainFor(MemoryAccess *MA) const { 643 return getDomainFor(MA->getStatement()); 644 } 645 646 isl::map ZoneAlgorithm::getAccessRelationFor(MemoryAccess *MA) const { 647 auto Domain = getDomainFor(MA); 648 auto AccRel = MA->getLatestAccessRelation(); 649 return AccRel.intersect_domain(Domain); 650 } 651 652 isl::map ZoneAlgorithm::getDefToTarget(ScopStmt *DefStmt, 653 ScopStmt *TargetStmt) { 654 // No translation required if the definition is already at the target. 655 if (TargetStmt == DefStmt) 656 return isl::map::identity( 657 getDomainFor(TargetStmt).get_space().map_from_set()); 658 659 isl::map &Result = DefToTargetCache[std::make_pair(TargetStmt, DefStmt)]; 660 661 // This is a shortcut in case the schedule is still the original and 662 // TargetStmt is in the same or nested inside DefStmt's loop. With the 663 // additional assumption that operand trees do not cross DefStmt's loop 664 // header, then TargetStmt's instance shared coordinates are the same as 665 // DefStmt's coordinates. All TargetStmt instances with this prefix share 666 // the same DefStmt instance. 667 // Model: 668 // 669 // for (int i < 0; i < N; i+=1) { 670 // DefStmt: 671 // D = ...; 672 // for (int j < 0; j < N; j+=1) { 673 // TargetStmt: 674 // use(D); 675 // } 676 // } 677 // 678 // Here, the value used in TargetStmt is defined in the corresponding 679 // DefStmt, i.e. 680 // 681 // { DefStmt[i] -> TargetStmt[i,j] } 682 // 683 // In practice, this should cover the majority of cases. 684 if (Result.is_null() && S->isOriginalSchedule() && 685 isInsideLoop(DefStmt->getSurroundingLoop(), 686 TargetStmt->getSurroundingLoop())) { 687 isl::set DefDomain = getDomainFor(DefStmt); 688 isl::set TargetDomain = getDomainFor(TargetStmt); 689 assert(unsignedFromIslSize(DefDomain.tuple_dim()) <= 690 unsignedFromIslSize(TargetDomain.tuple_dim())); 691 692 Result = isl::map::from_domain_and_range(DefDomain, TargetDomain); 693 for (unsigned i : rangeIslSize(0, DefDomain.tuple_dim())) 694 Result = Result.equate(isl::dim::in, i, isl::dim::out, i); 695 } 696 697 if (Result.is_null()) { 698 // { DomainDef[] -> DomainTarget[] } 699 Result = computeUseToDefFlowDependency(TargetStmt, DefStmt).reverse(); 700 simplify(Result); 701 } 702 703 return Result; 704 } 705 706 isl::map ZoneAlgorithm::getScalarReachingDefinition(ScopStmt *Stmt) { 707 auto &Result = ScalarReachDefZone[Stmt]; 708 if (!Result.is_null()) 709 return Result; 710 711 auto Domain = getDomainFor(Stmt); 712 Result = computeScalarReachingDefinition(Schedule, Domain, false, true); 713 simplify(Result); 714 715 return Result; 716 } 717 718 isl::map ZoneAlgorithm::getScalarReachingDefinition(isl::set DomainDef) { 719 auto DomId = DomainDef.get_tuple_id(); 720 auto *Stmt = static_cast<ScopStmt *>(isl_id_get_user(DomId.get())); 721 722 auto StmtResult = getScalarReachingDefinition(Stmt); 723 724 return StmtResult.intersect_range(DomainDef); 725 } 726 727 isl::map ZoneAlgorithm::makeUnknownForDomain(ScopStmt *Stmt) const { 728 return ::makeUnknownForDomain(getDomainFor(Stmt)); 729 } 730 731 isl::id ZoneAlgorithm::makeValueId(Value *V) { 732 if (!V) 733 return {}; 734 735 auto &Id = ValueIds[V]; 736 if (Id.is_null()) { 737 auto Name = getIslCompatibleName("Val_", V, ValueIds.size() - 1, 738 std::string(), UseInstructionNames); 739 Id = isl::id::alloc(IslCtx.get(), Name.c_str(), V); 740 } 741 return Id; 742 } 743 744 isl::space ZoneAlgorithm::makeValueSpace(Value *V) { 745 auto Result = ParamSpace.set_from_params(); 746 return Result.set_tuple_id(isl::dim::set, makeValueId(V)); 747 } 748 749 isl::set ZoneAlgorithm::makeValueSet(Value *V) { 750 auto Space = makeValueSpace(V); 751 return isl::set::universe(Space); 752 } 753 754 isl::map ZoneAlgorithm::makeValInst(Value *Val, ScopStmt *UserStmt, Loop *Scope, 755 bool IsCertain) { 756 // If the definition/write is conditional, the value at the location could 757 // be either the written value or the old value. Since we cannot know which 758 // one, consider the value to be unknown. 759 if (!IsCertain) 760 return makeUnknownForDomain(UserStmt); 761 762 auto DomainUse = getDomainFor(UserStmt); 763 auto VUse = VirtualUse::create(S, UserStmt, Scope, Val, true); 764 switch (VUse.getKind()) { 765 case VirtualUse::Constant: 766 case VirtualUse::Block: 767 case VirtualUse::Hoisted: 768 case VirtualUse::ReadOnly: { 769 // The definition does not depend on the statement which uses it. 770 auto ValSet = makeValueSet(Val); 771 return isl::map::from_domain_and_range(DomainUse, ValSet); 772 } 773 774 case VirtualUse::Synthesizable: { 775 auto *ScevExpr = VUse.getScevExpr(); 776 auto UseDomainSpace = DomainUse.get_space(); 777 778 // Construct the SCEV space. 779 // TODO: Add only the induction variables referenced in SCEVAddRecExpr 780 // expressions, not just all of them. 781 auto ScevId = isl::manage(isl_id_alloc(UseDomainSpace.ctx().get(), nullptr, 782 const_cast<SCEV *>(ScevExpr))); 783 784 auto ScevSpace = UseDomainSpace.drop_dims(isl::dim::set, 0, 0); 785 ScevSpace = ScevSpace.set_tuple_id(isl::dim::set, ScevId); 786 787 // { DomainUse[] -> ScevExpr[] } 788 auto ValInst = 789 isl::map::identity(UseDomainSpace.map_from_domain_and_range(ScevSpace)); 790 return ValInst; 791 } 792 793 case VirtualUse::Intra: { 794 // Definition and use is in the same statement. We do not need to compute 795 // a reaching definition. 796 797 // { llvm::Value } 798 auto ValSet = makeValueSet(Val); 799 800 // { UserDomain[] -> llvm::Value } 801 auto ValInstSet = isl::map::from_domain_and_range(DomainUse, ValSet); 802 803 // { UserDomain[] -> [UserDomain[] - >llvm::Value] } 804 auto Result = ValInstSet.domain_map().reverse(); 805 simplify(Result); 806 return Result; 807 } 808 809 case VirtualUse::Inter: { 810 // The value is defined in a different statement. 811 812 auto *Inst = cast<Instruction>(Val); 813 auto *ValStmt = S->getStmtFor(Inst); 814 815 // If the llvm::Value is defined in a removed Stmt, we cannot derive its 816 // domain. We could use an arbitrary statement, but this could result in 817 // different ValInst[] for the same llvm::Value. 818 if (!ValStmt) 819 return ::makeUnknownForDomain(DomainUse); 820 821 // { DomainUse[] -> DomainDef[] } 822 auto UsedInstance = getDefToTarget(ValStmt, UserStmt).reverse(); 823 824 // { llvm::Value } 825 auto ValSet = makeValueSet(Val); 826 827 // { DomainUse[] -> llvm::Value[] } 828 auto ValInstSet = isl::map::from_domain_and_range(DomainUse, ValSet); 829 830 // { DomainUse[] -> [DomainDef[] -> llvm::Value] } 831 auto Result = UsedInstance.range_product(ValInstSet); 832 833 simplify(Result); 834 return Result; 835 } 836 } 837 llvm_unreachable("Unhandled use type"); 838 } 839 840 /// Remove all computed PHIs out of @p Input and replace by their incoming 841 /// value. 842 /// 843 /// @param Input { [] -> ValInst[] } 844 /// @param ComputedPHIs Set of PHIs that are replaced. Its ValInst must appear 845 /// on the LHS of @p NormalizeMap. 846 /// @param NormalizeMap { ValInst[] -> ValInst[] } 847 static isl::union_map normalizeValInst(isl::union_map Input, 848 const DenseSet<PHINode *> &ComputedPHIs, 849 isl::union_map NormalizeMap) { 850 isl::union_map Result = isl::union_map::empty(Input.ctx()); 851 for (isl::map Map : Input.get_map_list()) { 852 isl::space Space = Map.get_space(); 853 isl::space RangeSpace = Space.range(); 854 855 // Instructions within the SCoP are always wrapped. Non-wrapped tuples 856 // are therefore invariant in the SCoP and don't need normalization. 857 if (!RangeSpace.is_wrapping()) { 858 Result = Result.unite(Map); 859 continue; 860 } 861 862 auto *PHI = dyn_cast<PHINode>(static_cast<Value *>( 863 RangeSpace.unwrap().get_tuple_id(isl::dim::out).get_user())); 864 865 // If no normalization is necessary, then the ValInst stands for itself. 866 if (!ComputedPHIs.count(PHI)) { 867 Result = Result.unite(Map); 868 continue; 869 } 870 871 // Otherwise, apply the normalization. 872 isl::union_map Mapped = isl::union_map(Map).apply_range(NormalizeMap); 873 Result = Result.unite(Mapped); 874 NumPHINormialization++; 875 } 876 return Result; 877 } 878 879 isl::union_map ZoneAlgorithm::makeNormalizedValInst(llvm::Value *Val, 880 ScopStmt *UserStmt, 881 llvm::Loop *Scope, 882 bool IsCertain) { 883 isl::map ValInst = makeValInst(Val, UserStmt, Scope, IsCertain); 884 isl::union_map Normalized = 885 normalizeValInst(ValInst, ComputedPHIs, NormalizeMap); 886 return Normalized; 887 } 888 889 bool ZoneAlgorithm::isCompatibleAccess(MemoryAccess *MA) { 890 if (!MA) 891 return false; 892 if (!MA->isLatestArrayKind()) 893 return false; 894 Instruction *AccInst = MA->getAccessInstruction(); 895 return isa<StoreInst>(AccInst) || isa<LoadInst>(AccInst); 896 } 897 898 bool ZoneAlgorithm::isNormalizable(MemoryAccess *MA) { 899 assert(MA->isRead()); 900 901 // Exclude ExitPHIs, we are assuming that a normalizable PHI has a READ 902 // MemoryAccess. 903 if (!MA->isOriginalPHIKind()) 904 return false; 905 906 // Exclude recursive PHIs, normalizing them would require a transitive 907 // closure. 908 auto *PHI = cast<PHINode>(MA->getAccessInstruction()); 909 if (RecursivePHIs.count(PHI)) 910 return false; 911 912 // Ensure that each incoming value can be represented by a ValInst[]. 913 // We do represent values from statements associated to multiple incoming 914 // value by the PHI itself, but we do not handle this case yet (especially 915 // isNormalized()) when normalizing. 916 const ScopArrayInfo *SAI = MA->getOriginalScopArrayInfo(); 917 auto Incomings = S->getPHIIncomings(SAI); 918 for (MemoryAccess *Incoming : Incomings) { 919 if (Incoming->getIncoming().size() != 1) 920 return false; 921 } 922 923 return true; 924 } 925 926 isl::boolean ZoneAlgorithm::isNormalized(isl::map Map) { 927 isl::space Space = Map.get_space(); 928 isl::space RangeSpace = Space.range(); 929 930 isl::boolean IsWrapping = RangeSpace.is_wrapping(); 931 if (!IsWrapping.is_true()) 932 return !IsWrapping; 933 isl::space Unwrapped = RangeSpace.unwrap(); 934 935 isl::id OutTupleId = Unwrapped.get_tuple_id(isl::dim::out); 936 if (OutTupleId.is_null()) 937 return isl::boolean(); 938 auto *PHI = dyn_cast<PHINode>(static_cast<Value *>(OutTupleId.get_user())); 939 if (!PHI) 940 return true; 941 942 isl::id InTupleId = Unwrapped.get_tuple_id(isl::dim::in); 943 if (OutTupleId.is_null()) 944 return isl::boolean(); 945 auto *IncomingStmt = static_cast<ScopStmt *>(InTupleId.get_user()); 946 MemoryAccess *PHIRead = IncomingStmt->lookupPHIReadOf(PHI); 947 if (!isNormalizable(PHIRead)) 948 return true; 949 950 return false; 951 } 952 953 isl::boolean ZoneAlgorithm::isNormalized(isl::union_map UMap) { 954 isl::boolean Result = true; 955 for (isl::map Map : UMap.get_map_list()) { 956 Result = isNormalized(Map); 957 if (Result.is_true()) 958 continue; 959 break; 960 } 961 return Result; 962 } 963 964 void ZoneAlgorithm::computeCommon() { 965 AllReads = makeEmptyUnionMap(); 966 AllMayWrites = makeEmptyUnionMap(); 967 AllMustWrites = makeEmptyUnionMap(); 968 AllWriteValInst = makeEmptyUnionMap(); 969 AllReadValInst = makeEmptyUnionMap(); 970 971 // Default to empty, i.e. no normalization/replacement is taking place. Call 972 // computeNormalizedPHIs() to initialize. 973 NormalizeMap = makeEmptyUnionMap(); 974 ComputedPHIs.clear(); 975 976 for (auto &Stmt : *S) { 977 for (auto *MA : Stmt) { 978 if (!MA->isLatestArrayKind()) 979 continue; 980 981 if (MA->isRead()) 982 addArrayReadAccess(MA); 983 984 if (MA->isWrite()) 985 addArrayWriteAccess(MA); 986 } 987 } 988 989 // { DomainWrite[] -> Element[] } 990 AllWrites = AllMustWrites.unite(AllMayWrites); 991 992 // { [Element[] -> Zone[]] -> DomainWrite[] } 993 WriteReachDefZone = 994 computeReachingDefinition(Schedule, AllWrites, false, true); 995 simplify(WriteReachDefZone); 996 } 997 998 void ZoneAlgorithm::computeNormalizedPHIs() { 999 // Determine which PHIs can reference themselves. They are excluded from 1000 // normalization to avoid problems with transitive closures. 1001 for (ScopStmt &Stmt : *S) { 1002 for (MemoryAccess *MA : Stmt) { 1003 if (!MA->isPHIKind()) 1004 continue; 1005 if (!MA->isRead()) 1006 continue; 1007 1008 // TODO: Can be more efficient since isRecursivePHI can theoretically 1009 // determine recursiveness for multiple values and/or cache results. 1010 auto *PHI = cast<PHINode>(MA->getAccessInstruction()); 1011 if (isRecursivePHI(PHI)) { 1012 NumRecursivePHIs++; 1013 RecursivePHIs.insert(PHI); 1014 } 1015 } 1016 } 1017 1018 // { PHIValInst[] -> IncomingValInst[] } 1019 isl::union_map AllPHIMaps = makeEmptyUnionMap(); 1020 1021 // Discover new PHIs and try to normalize them. 1022 DenseSet<PHINode *> AllPHIs; 1023 for (ScopStmt &Stmt : *S) { 1024 for (MemoryAccess *MA : Stmt) { 1025 if (!MA->isOriginalPHIKind()) 1026 continue; 1027 if (!MA->isRead()) 1028 continue; 1029 if (!isNormalizable(MA)) 1030 continue; 1031 1032 auto *PHI = cast<PHINode>(MA->getAccessInstruction()); 1033 const ScopArrayInfo *SAI = MA->getOriginalScopArrayInfo(); 1034 1035 // Determine which instance of the PHI statement corresponds to which 1036 // incoming value. Skip if we cannot determine PHI predecessors. 1037 // { PHIDomain[] -> IncomingDomain[] } 1038 isl::union_map PerPHI = computePerPHI(SAI); 1039 if (PerPHI.is_null()) 1040 continue; 1041 1042 // { PHIDomain[] -> PHIValInst[] } 1043 isl::map PHIValInst = makeValInst(PHI, &Stmt, Stmt.getSurroundingLoop()); 1044 1045 // { IncomingDomain[] -> IncomingValInst[] } 1046 isl::union_map IncomingValInsts = makeEmptyUnionMap(); 1047 1048 // Get all incoming values. 1049 for (MemoryAccess *MA : S->getPHIIncomings(SAI)) { 1050 ScopStmt *IncomingStmt = MA->getStatement(); 1051 1052 auto Incoming = MA->getIncoming(); 1053 assert(Incoming.size() == 1 && "The incoming value must be " 1054 "representable by something else than " 1055 "the PHI itself"); 1056 Value *IncomingVal = Incoming[0].second; 1057 1058 // { IncomingDomain[] -> IncomingValInst[] } 1059 isl::map IncomingValInst = makeValInst( 1060 IncomingVal, IncomingStmt, IncomingStmt->getSurroundingLoop()); 1061 1062 IncomingValInsts = IncomingValInsts.unite(IncomingValInst); 1063 } 1064 1065 // { PHIValInst[] -> IncomingValInst[] } 1066 isl::union_map PHIMap = 1067 PerPHI.apply_domain(PHIValInst).apply_range(IncomingValInsts); 1068 assert(!PHIMap.is_single_valued().is_false()); 1069 1070 // Resolve transitiveness: The incoming value of the newly discovered PHI 1071 // may reference a previously normalized PHI. At the same time, already 1072 // normalized PHIs might be normalized to the new PHI. At the end, none of 1073 // the PHIs may appear on the right-hand-side of the normalization map. 1074 PHIMap = normalizeValInst(PHIMap, AllPHIs, AllPHIMaps); 1075 AllPHIs.insert(PHI); 1076 AllPHIMaps = normalizeValInst(AllPHIMaps, AllPHIs, PHIMap); 1077 1078 AllPHIMaps = AllPHIMaps.unite(PHIMap); 1079 NumNormalizablePHIs++; 1080 } 1081 } 1082 simplify(AllPHIMaps); 1083 1084 // Apply the normalization. 1085 ComputedPHIs = AllPHIs; 1086 NormalizeMap = AllPHIMaps; 1087 1088 assert(NormalizeMap.is_null() || isNormalized(NormalizeMap)); 1089 } 1090 1091 void ZoneAlgorithm::printAccesses(llvm::raw_ostream &OS, int Indent) const { 1092 OS.indent(Indent) << "After accesses {\n"; 1093 for (auto &Stmt : *S) { 1094 OS.indent(Indent + 4) << Stmt.getBaseName() << "\n"; 1095 for (auto *MA : Stmt) 1096 MA->print(OS); 1097 } 1098 OS.indent(Indent) << "}\n"; 1099 } 1100 1101 isl::union_map ZoneAlgorithm::computeKnownFromMustWrites() const { 1102 // { [Element[] -> Zone[]] -> [Element[] -> DomainWrite[]] } 1103 isl::union_map EltReachdDef = distributeDomain(WriteReachDefZone.curry()); 1104 1105 // { [Element[] -> DomainWrite[]] -> ValInst[] } 1106 isl::union_map AllKnownWriteValInst = filterKnownValInst(AllWriteValInst); 1107 1108 // { [Element[] -> Zone[]] -> ValInst[] } 1109 return EltReachdDef.apply_range(AllKnownWriteValInst); 1110 } 1111 1112 isl::union_map ZoneAlgorithm::computeKnownFromLoad() const { 1113 // { Element[] } 1114 isl::union_set AllAccessedElts = AllReads.range().unite(AllWrites.range()); 1115 1116 // { Element[] -> Scatter[] } 1117 isl::union_map EltZoneUniverse = isl::union_map::from_domain_and_range( 1118 AllAccessedElts, isl::set::universe(ScatterSpace)); 1119 1120 // This assumes there are no "holes" in 1121 // isl_union_map_domain(WriteReachDefZone); alternatively, compute the zone 1122 // before the first write or that are not written at all. 1123 // { Element[] -> Scatter[] } 1124 isl::union_set NonReachDef = 1125 EltZoneUniverse.wrap().subtract(WriteReachDefZone.domain()); 1126 1127 // { [Element[] -> Zone[]] -> ReachDefId[] } 1128 isl::union_map DefZone = 1129 WriteReachDefZone.unite(isl::union_map::from_domain(NonReachDef)); 1130 1131 // { [Element[] -> Scatter[]] -> Element[] } 1132 isl::union_map EltZoneElt = EltZoneUniverse.domain_map(); 1133 1134 // { [Element[] -> Zone[]] -> [Element[] -> ReachDefId[]] } 1135 isl::union_map DefZoneEltDefId = EltZoneElt.range_product(DefZone); 1136 1137 // { Element[] -> [Zone[] -> ReachDefId[]] } 1138 isl::union_map EltDefZone = DefZone.curry(); 1139 1140 // { [Element[] -> Zone[] -> [Element[] -> ReachDefId[]] } 1141 isl::union_map EltZoneEltDefid = distributeDomain(EltDefZone); 1142 1143 // { [Element[] -> Scatter[]] -> DomainRead[] } 1144 isl::union_map Reads = AllReads.range_product(Schedule).reverse(); 1145 1146 // { [Element[] -> Scatter[]] -> [Element[] -> DomainRead[]] } 1147 isl::union_map ReadsElt = EltZoneElt.range_product(Reads); 1148 1149 // { [Element[] -> Scatter[]] -> ValInst[] } 1150 isl::union_map ScatterKnown = ReadsElt.apply_range(AllReadValInst); 1151 1152 // { [Element[] -> ReachDefId[]] -> ValInst[] } 1153 isl::union_map DefidKnown = 1154 DefZoneEltDefId.apply_domain(ScatterKnown).reverse(); 1155 1156 // { [Element[] -> Zone[]] -> ValInst[] } 1157 return DefZoneEltDefId.apply_range(DefidKnown); 1158 } 1159 1160 isl::union_map ZoneAlgorithm::computeKnown(bool FromWrite, 1161 bool FromRead) const { 1162 isl::union_map Result = makeEmptyUnionMap(); 1163 1164 if (FromWrite) 1165 Result = Result.unite(computeKnownFromMustWrites()); 1166 1167 if (FromRead) 1168 Result = Result.unite(computeKnownFromLoad()); 1169 1170 simplify(Result); 1171 return Result; 1172 } 1173